summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c162
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h12
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c23
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c28
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c224
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c4
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c68
-rw-r--r--drivers/gpu/drm/drm_file.c22
-rw-r--r--drivers/gpu/drm/drm_ioc32.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c22
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c36
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c16
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h4
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c27
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h6
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.h16
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c69
-rw-r--r--drivers/gpu/drm/imx/Kconfig3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c7
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c30
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c76
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c37
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h220
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c65
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c325
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h28
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c126
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h10
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c39
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c26
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c126
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c16
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h1
-rw-r--r--drivers/gpu/vga/vgaarb.c49
157 files changed, 1814 insertions, 1996 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 9efa681d0878..8d0d7f3dd5fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -411,6 +411,8 @@ struct amdgpu_fpriv {
struct amdgpu_ctx_mgr ctx_mgr;
};
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
+
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index e957e42c539a..fe1d7368c1e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -131,7 +131,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
- int i, n;
+ int i;
int last_valid_bit;
if (adev->kfd.dev) {
@@ -142,7 +142,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
.gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GMC_HOLE_START),
- .drm_render_minor = adev->ddev->render->index
+ .drm_render_minor = adev->ddev->render->index,
+ .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
+
};
/* this is going to have a few of the MSBs set that we need to
@@ -172,35 +174,20 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_aperture_size,
&gpu_resources.doorbell_start_offset);
- if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
- return;
- }
-
- n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
-
- for (i = 0; i < n; i += 2) {
- /* On SOC15 the BIF is involved in routing
- * doorbells using the low 12 bits of the
- * address. Communicate the assignments to
- * KFD. KFD uses two doorbell pages per
- * process in case of 64-bit doorbells so we
- * can use each doorbell assignment twice.
- */
- gpu_resources.sdma_doorbell[0][i] =
- adev->doorbell_index.sdma_engine[0] + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- adev->doorbell_index.sdma_engine[0] + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- adev->doorbell_index.sdma_engine[1] + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- adev->doorbell_index.sdma_engine[1] + 0x200 + (i >> 1);
- }
- /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
- * SDMA, IH and VCN. So don't use them for the CP.
+ /* Since SOC15, BIF starts to statically use the
+ * lower 12 bits of doorbell addresses for routing
+ * based on settings in registers like
+ * SDMA0_DOORBELL_RANGE etc..
+ * In order to route a doorbell to CP engine, the lower
+ * 12 bits of its address has to be outside the range
+ * set for SDMA, VCN, and IH blocks.
*/
- gpu_resources.reserved_doorbell_mask = 0x1e0;
- gpu_resources.reserved_doorbell_val = 0x0e0;
+ if (adev->asic_type >= CHIP_VEGA10) {
+ gpu_resources.non_cp_doorbells_start =
+ adev->doorbell_index.first_non_cp;
+ gpu_resources.non_cp_doorbells_end =
+ adev->doorbell_index.last_non_cp;
+ }
kgd2kfd_device_init(adev->kfd.dev, &gpu_resources);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index d7b10d79f1de..1921dec3df7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -204,38 +204,25 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
}
-/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
+/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
* reservation object.
*
* @bo: [IN] Remove eviction fence(s) from this BO
- * @ef: [IN] If ef is specified, then this eviction fence is removed if it
+ * @ef: [IN] This eviction fence is removed if it
* is present in the shared list.
- * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
- * from BO's reservation object shared list.
- * @ef_count: [OUT] Number of fences in ef_list.
*
- * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
- * called to restore the eviction fences and to avoid memory leak. This is
- * useful for shared BOs.
* NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
*/
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
- struct amdgpu_amdkfd_fence *ef,
- struct amdgpu_amdkfd_fence ***ef_list,
- unsigned int *ef_count)
+ struct amdgpu_amdkfd_fence *ef)
{
struct reservation_object *resv = bo->tbo.resv;
struct reservation_object_list *old, *new;
unsigned int i, j, k;
- if (!ef && !ef_list)
+ if (!ef)
return -EINVAL;
- if (ef_list) {
- *ef_list = NULL;
- *ef_count = 0;
- }
-
old = reservation_object_get_list(resv);
if (!old)
return 0;
@@ -254,8 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
f = rcu_dereference_protected(old->shared[i],
reservation_object_held(resv));
- if ((ef && f->context == ef->base.context) ||
- (!ef && to_amdgpu_amdkfd_fence(f)))
+ if (f->context == ef->base.context)
RCU_INIT_POINTER(new->shared[--j], f);
else
RCU_INIT_POINTER(new->shared[k++], f);
@@ -263,21 +249,6 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
new->shared_max = old->shared_max;
new->shared_count = k;
- if (!ef) {
- unsigned int count = old->shared_count - j;
-
- /* Alloc memory for count number of eviction fence pointers.
- * Fill the ef_list array and ef_count
- */
- *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
- *ef_count = count;
-
- if (!*ef_list) {
- kfree(new);
- return -ENOMEM;
- }
- }
-
/* Install the new fence list, seqcount provides the barriers */
preempt_disable();
write_seqcount_begin(&resv->seq);
@@ -291,46 +262,13 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
f = rcu_dereference_protected(new->shared[i],
reservation_object_held(resv));
- if (!ef)
- (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
- else
- dma_fence_put(f);
+ dma_fence_put(f);
}
kfree_rcu(old, rcu);
return 0;
}
-/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
- * reservation object.
- *
- * @bo: [IN] Add eviction fences to this BO
- * @ef_list: [IN] List of eviction fences to be added
- * @ef_count: [IN] Number of fences in ef_list.
- *
- * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
- * function.
- */
-static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
- struct amdgpu_amdkfd_fence **ef_list,
- unsigned int ef_count)
-{
- int i;
-
- if (!ef_list || !ef_count)
- return;
-
- for (i = 0; i < ef_count; i++) {
- amdgpu_bo_fence(bo, &ef_list[i]->base, true);
- /* Re-adding the fence takes an additional reference. Drop that
- * reference.
- */
- dma_fence_put(&ef_list[i]->base);
- }
-
- kfree(ef_list);
-}
-
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -346,18 +284,8 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
goto validate_fail;
- if (wait) {
- struct amdgpu_amdkfd_fence **ef_list;
- unsigned int ef_count;
-
- ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
- &ef_count);
- if (ret)
- goto validate_fail;
-
- ttm_bo_wait(&bo->tbo, false, false);
- amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
- }
+ if (wait)
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
validate_fail:
return ret;
@@ -444,7 +372,6 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
{
int ret;
struct kfd_bo_va_list *bo_va_entry;
- struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va;
struct list_head *list_bo_va = &mem->bo_va_list;
@@ -484,14 +411,8 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
*p_bo_va_entry = bo_va_entry;
/* Allocate new page tables if needed and validate
- * them. Clearing of new page tables and validate need to wait
- * on move fences. We don't want that to trigger the eviction
- * fence, so remove it temporarily.
+ * them.
*/
- amdgpu_amdkfd_remove_eviction_fence(pd,
- vm->process_info->eviction_fence,
- NULL, NULL);
-
ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
if (ret) {
pr_err("Failed to allocate pts, err=%d\n", ret);
@@ -504,13 +425,9 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
goto err_alloc_pts;
}
- /* Add the eviction fence back */
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
return 0;
err_alloc_pts:
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
list_del(&bo_va_entry->bo_list);
err_vmadd:
@@ -809,24 +726,11 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_vm *vm = bo_va->base.vm;
- struct amdgpu_bo *pd = vm->root.base.bo;
- /* Remove eviction fence from PD (and thereby from PTs too as
- * they share the resv. object). Otherwise during PT update
- * job (see amdgpu_vm_bo_update_mapping), eviction fence would
- * get added to job->sync object and job execution would
- * trigger the eviction fence.
- */
- amdgpu_amdkfd_remove_eviction_fence(pd,
- vm->process_info->eviction_fence,
- NULL, NULL);
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
- /* Add the eviction fence back */
- amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
-
amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
@@ -1002,7 +906,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
+ amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
amdgpu_bo_fence(vm->root.base.bo,
@@ -1389,8 +1293,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
* attached
*/
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence,
- NULL, NULL);
+ process_info->eviction_fence);
pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
mem->va + bo_size * (1 + mem->aql_queue));
@@ -1617,8 +1520,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
if (mem->mapped_to_gpu_memory == 0 &&
!amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
- process_info->eviction_fence,
- NULL, NULL);
+ process_info->eviction_fence);
unreserve_out:
unreserve_bo_and_vms(&ctx, false, false);
@@ -1679,7 +1581,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
}
amdgpu_amdkfd_remove_eviction_fence(
- bo, mem->process_info->eviction_fence, NULL, NULL);
+ bo, mem->process_info->eviction_fence);
list_del_init(&mem->validate_list.head);
if (size)
@@ -1945,16 +1847,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
amdgpu_sync_create(&sync);
- /* Avoid triggering eviction fences when unmapping invalid
- * userptr BOs (waits for all fences, doesn't use
- * FENCE_OWNER_VM)
- */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
- process_info->eviction_fence,
- NULL, NULL);
-
ret = process_validate_vms(process_info);
if (ret)
goto unreserve_out;
@@ -2015,10 +1907,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ret = process_update_pds(process_info, &sync);
unreserve_out:
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node)
- amdgpu_bo_fence(peer_vm->root.base.bo,
- &process_info->eviction_fence->base, true);
ttm_eu_backoff_reservation(&ticket, &resv_list);
amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d85184b5b35c..7b526593eb77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -124,6 +124,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
unsigned num_rings;
+ unsigned num_rqs = 0;
switch (i) {
case AMDGPU_HW_IP_GFX:
@@ -166,12 +167,16 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
break;
}
- for (j = 0; j < num_rings; ++j)
- rqs[j] = &rings[j]->sched.sched_rq[priority];
+ for (j = 0; j < num_rings; ++j) {
+ if (!rings[j]->adev)
+ continue;
+
+ rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
+ }
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(&ctx->entities[i][j].entity,
- rqs, num_rings, &ctx->guilty);
+ rqs, num_rqs, &ctx->guilty);
if (r)
goto error_cleanup_entities;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index dd9a4fb9ce39..4ae3ff9a1d4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -158,9 +158,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
while (size) {
uint32_t value;
- if (*pos > adev->rmmio_size)
- goto end;
-
if (read) {
value = RREG32(*pos >> 2);
r = put_user(value, (uint32_t *)buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index 1cfec06f81d4..68959b923f89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -71,6 +71,8 @@ struct amdgpu_doorbell_index {
uint32_t vce_ring6_7;
} uvd_vce;
};
+ uint32_t first_non_cp;
+ uint32_t last_non_cp;
uint32_t max_assignment;
/* Per engine SDMA doorbell size in dword */
uint32_t sdma_doorbell_range;
@@ -143,6 +145,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+
+ AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
+ AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
+
AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
@@ -222,6 +228,9 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+ AMDGPU_DOORBELL64_FIRST_NON_CP = AMDGPU_DOORBELL64_sDMA_ENGINE0,
+ AMDGPU_DOORBELL64_LAST_NON_CP = AMDGPU_DOORBELL64_VCE_RING6_7,
+
AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
AMDGPU_DOORBELL64_INVALID = 0xFFFF
} AMDGPU_DOORBELL64_ASSIGNMENT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 1c4595562f8f..344967df3137 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -184,61 +184,6 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
return vrefresh;
}
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u)
-{
- u32 b_c = 0;
- u32 i_c;
- u32 tmp;
-
- i_c = (i * r_c) / 100;
- tmp = i_c >> p_b;
-
- while (tmp) {
- b_c++;
- tmp >>= 1;
- }
-
- *u = (b_c + 1) / 2;
- *p = i_c / (1 << (2 * (*u)));
-}
-
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
-{
- u32 k, a, ah, al;
- u32 t1;
-
- if ((fl == 0) || (fh == 0) || (fl > fh))
- return -EINVAL;
-
- k = (100 * fh) / fl;
- t1 = (t * (k - 100));
- a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
- a = (a + 5) / 10;
- ah = ((a * t) + 5000) / 10000;
- al = a - ah;
-
- *th = t - ah;
- *tl = t + al;
-
- return 0;
-}
-
-bool amdgpu_is_uvd_state(u32 class, u32 class2)
-{
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return true;
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return true;
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return true;
- return false;
-}
-
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
{
switch (sensor) {
@@ -949,39 +894,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
return AMDGPU_PCIE_GEN1;
}
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes)
-{
- switch (asic_lanes) {
- case 0:
- default:
- return default_lanes;
- case 1:
- return 1;
- case 2:
- return 2;
- case 4:
- return 4;
- case 8:
- return 8;
- case 12:
- return 12;
- case 16:
- return 16;
- }
-}
-
-u8 amdgpu_encode_pci_lane_width(u32 lanes)
-{
- u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
-
- if (lanes > 16)
- return 0;
-
- return encoded_lanes[lanes];
-}
-
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 2f61e9edb1c1..e871e022c129 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -486,10 +486,6 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-bool amdgpu_is_uvd_state(u32 class, u32 class2);
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u);
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
@@ -505,11 +501,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
enum amdgpu_pcie_gen asic_gen,
enum amdgpu_pcie_gen default_gen);
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes);
-u8 amdgpu_encode_pci_lane_width(u32 lanes);
-
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7f3aa7b7e1d8..7419ea8a388b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -73,9 +73,10 @@
* - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
+ * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 29
+#define KMS_DRIVER_MINOR 30
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -1178,6 +1179,22 @@ static const struct file_operations amdgpu_driver_kms_fops = {
#endif
};
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
+{
+ struct drm_file *file;
+
+ if (!filp)
+ return -EINVAL;
+
+ if (filp->f_op != &amdgpu_driver_kms_fops) {
+ return -EINVAL;
+ }
+
+ file = filp->private_data;
+ *fpriv = file->driver_priv;
+ return 0;
+}
+
static bool
amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
bool in_vblank_irq, int *vpos, int *hpos,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index d0a5db777b6d..1c50be3ab8a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -140,9 +140,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih))
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
u32 wptr;
@@ -162,7 +160,7 @@ restart_ih:
rmb();
while (ih->rptr != wptr) {
- callback(adev, ih);
+ amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 1ccb1831382a..113a1ba13d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -69,8 +69,6 @@ struct amdgpu_ih_funcs {
int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
unsigned ring_size, bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
-int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
- void (*callback)(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih));
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 8bfb3dab46f7..af4c3b1af322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -131,29 +131,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_callback - callback from the IH ring
- *
- * @adev: amdgpu device pointer
- * @ih: amdgpu ih ring
- *
- * Callback from IH ring processing to handle the entry at the current position
- * and advance the read pointer.
- */
-static void amdgpu_irq_callback(struct amdgpu_device *adev,
- struct amdgpu_ih_ring *ih)
-{
- u32 ring_index = ih->rptr >> 2;
- struct amdgpu_iv_entry entry;
-
- entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
- amdgpu_ih_decode_iv(adev, &entry);
-
- trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
-
- amdgpu_irq_dispatch(adev, &entry);
-}
-
-/**
* amdgpu_irq_handler - IRQ handler
*
* @irq: IRQ number (unused)
@@ -170,7 +147,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
struct amdgpu_device *adev = dev->dev_private;
irqreturn_t ret;
- ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback);
+ ret = amdgpu_ih_process(adev, &adev->irq.ih);
if (ret == IRQ_HANDLED)
pm_runtime_mark_last_busy(dev->dev);
return ret;
@@ -188,7 +165,7 @@ static void amdgpu_irq_handle_ih1(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
irq.ih1_work);
- amdgpu_ih_process(adev, &adev->irq.ih1, amdgpu_irq_callback);
+ amdgpu_ih_process(adev, &adev->irq.ih1);
}
/**
@@ -203,7 +180,7 @@ static void amdgpu_irq_handle_ih2(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
irq.ih2_work);
- amdgpu_ih_process(adev, &adev->irq.ih2, amdgpu_irq_callback);
+ amdgpu_ih_process(adev, &adev->irq.ih2);
}
/**
@@ -394,14 +371,23 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
* Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih)
{
- unsigned client_id = entry->client_id;
- unsigned src_id = entry->src_id;
+ u32 ring_index = ih->rptr >> 2;
+ struct amdgpu_iv_entry entry;
+ unsigned client_id, src_id;
struct amdgpu_irq_src *src;
bool handled = false;
int r;
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
+ amdgpu_ih_decode_iv(adev, &entry);
+
+ trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
+
+ client_id = entry.client_id;
+ src_id = entry.src_id;
+
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
@@ -416,7 +402,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
client_id, src_id);
} else if ((src = adev->irq.client[client_id].sources[src_id])) {
- r = src->funcs->process(adev, src, entry);
+ r = src->funcs->process(adev, src, &entry);
if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
else if (r)
@@ -428,7 +414,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
/* Send it to amdkfd as well if it isn't already handled */
if (!handled)
- amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
+ amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index c27decfda494..c718e94a55c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -108,7 +108,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source);
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
+ struct amdgpu_ih_ring *ih);
int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index bc62bf41b7e9..e860412043bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -207,11 +207,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if (!r) {
acpi_status = amdgpu_acpi_init(adev);
if (acpi_status)
- dev_dbg(&dev->pdev->dev,
+ dev_dbg(&dev->pdev->dev,
"Error during ACPI methods call\n");
}
if (amdgpu_device_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 698fd8a2f775..889e443eeee7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -406,6 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ u64 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index fd9c4beeaaa4..ec9e45004bff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1285,6 +1285,30 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
+ * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ *
+ * @bo: buffer object
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_sync sync;
+ int r;
+
+ amdgpu_sync_create(&sync);
+ amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+ r = amdgpu_sync_wait(&sync, intr);
+ amdgpu_sync_free(&sync);
+
+ return r;
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9291c2f837e9..220a6a7b1bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -266,6 +266,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 1cafe8d83a4d..0767a93e4d91 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -54,16 +54,20 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
enum drm_sched_priority priority)
{
struct file *filp = fget(fd);
- struct drm_file *file;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
+ int r;
if (!filp)
return -EINVAL;
- file = filp->private_data;
- fpriv = file->driver_priv;
+ r = amdgpu_file_to_fpriv(filp, &fpriv);
+ if (r) {
+ fput(filp);
+ return r;
+ }
+
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
@@ -72,6 +76,39 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
+ int fd,
+ unsigned ctx_id,
+ enum drm_sched_priority priority)
+{
+ struct file *filp = fget(fd);
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ int r;
+
+ if (!filp)
+ return -EINVAL;
+
+ r = amdgpu_file_to_fpriv(filp, &fpriv);
+ if (r) {
+ fput(filp);
+ return r;
+ }
+
+ ctx = amdgpu_ctx_get(fpriv, ctx_id);
+
+ if (!ctx) {
+ fput(filp);
+ return -EINVAL;
+ }
+
+ amdgpu_ctx_priority_override(ctx, priority);
+ amdgpu_ctx_put(ctx);
+ fput(filp);
+
+ return 0;
+}
+
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -81,7 +118,7 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
@@ -90,6 +127,12 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
args->in.fd,
priority);
break;
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_context_priority_override(adev,
+ args->in.fd,
+ args->in.ctx_id,
+ priority);
+ break;
default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
r = -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 7cd2336e29ff..bfa9062ce6b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -652,12 +652,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
+#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
+#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
@@ -698,8 +700,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
- vm->bulk_moveable &= list_empty(&vm->evicted);
-
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -828,7 +828,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
WARN_ON(job->ibs[0].length_dw > 64);
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_FENCE_OWNER_KFD, false);
if (r)
goto error_free;
@@ -947,10 +947,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (r)
return r;
- r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
- if (r)
- goto error_free_pt;
-
if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL);
if (r)
@@ -963,6 +959,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+
+ r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
+ if (r)
+ goto error_free_pt;
}
return 0;
@@ -1332,31 +1332,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
}
}
-
-/**
- * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
- *
- * @adev: amdgpu_device pointer
- * @vm: related vm
- * @owner: fence owner
- *
- * Returns:
- * 0 on success, errno otherwise.
- */
-static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- void *owner)
-{
- struct amdgpu_sync sync;
- int r;
-
- amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false);
- r = amdgpu_sync_wait(&sync, true);
- amdgpu_sync_free(&sync);
-
- return r;
-}
-
/**
* amdgpu_vm_update_func - helper to call update function
*
@@ -1451,7 +1426,8 @@ restart:
params.adev = adev;
if (vm->use_cpu_for_update) {
- r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_VM, true);
if (unlikely(r))
return r;
@@ -1772,9 +1748,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.adev = adev;
params.vm = vm;
- /* sync to everything on unmapping */
+ /* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+ owner = AMDGPU_FENCE_OWNER_KFD;
if (vm->use_cpu_for_update) {
/* params.src is used as flag to indicate system Memory */
@@ -1784,7 +1760,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* Wait for PT BOs to be idle. PTs share the same resv. object
* as the root PD BO
*/
- r = amdgpu_vm_wait_pd(adev, vm, owner);
+ r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
if (unlikely(r))
return r;
@@ -3057,13 +3033,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_unreserve;
+ amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
if (r)
goto error_unreserve;
- amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db443ec53d3a..bea32f076b91 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2980,7 +2980,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- unsigned long flags;
+ unsigned long flags;
unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5533f6e4f4a4..d0309e8c9d12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -220,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index b11a1c17a7f2..73851ebb3833 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -266,7 +266,8 @@ flr_done:
}
/* Trigger recovery for world switch failure if no TDR */
- if (amdgpu_device_should_recover_gpu(adev))
+ if (amdgpu_device_should_recover_gpu(adev)
+ && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT)
amdgpu_device_gpu_recover(adev, NULL);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 221f26e50322..c69d51598cfe 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -32,7 +32,7 @@
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+ u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c63de945c021..0487e3a4e9e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -500,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
uint32_t reg;
- reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
- WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg);
- reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
+ reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000);
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 127b85983e8f..c816e55d43a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
{
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
@@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 79c1a9bbcc21..9d8df68893b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1436,7 +1436,7 @@ static int si_common_early_init(void *handle)
AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_HDP_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = 0;
adev->external_rev_id = (adev->rev_id == 0) ? 1 :
(adev->rev_id == 1) ? 5 : 6;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index da58040fdbdc..41e01a7f57a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6216,10 +6216,12 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
if (current_link_speed == AMDGPU_PCIE_GEN2)
break;
+ /* fall through */
case AMDGPU_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 99ebcf29dcb0..ed89a101f73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -461,7 +461,6 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
- case CHIP_VEGA20:
soc15_asic_get_baco_capability(adev, &baco_reset);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index 4b5d60ea3e78..a8e92638a2e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -81,6 +81,10 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP;
+
/* In unit of dword doorbell */
adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
adev->doorbell_index.sdma_doorbell_range = 4;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 53716c593b2b..0db84386252a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -85,6 +85,10 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP;
+
adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
adev->doorbell_index.sdma_doorbell_range = 20;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 8372556b52eb..c6c9530e704e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -134,12 +134,18 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
*/
q->doorbell_id = q->properties.queue_id;
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- /* For SDMA queues on SOC15, use static doorbell
- * assignments based on the engine and queue.
+ /* For SDMA queues on SOC15 with 8-byte doorbell, use static
+ * doorbell assignments based on the engine and queue id.
+ * The doobell index distance between RLC (2*i) and (2*i+1)
+ * for a SDMA engine is 512.
*/
- q->doorbell_id = dev->shared_resources.sdma_doorbell
- [q->properties.sdma_engine_id]
- [q->properties.sdma_queue_id];
+ uint32_t *idx_offset =
+ dev->shared_resources.sdma_doorbell_idx;
+
+ q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
+ + (q->properties.sdma_queue_id & 1)
+ * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+ + (q->properties.sdma_queue_id >> 1);
} else {
/* For CP queues on SOC15 reserve a free doorbell ID */
unsigned int found;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 47243165a082..ae90a99909ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
- uint64_t addr;
- struct cik_mqd *m;
- int retval;
-
- retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
- mqd_mem_obj);
-
- if (retval != 0)
- return -ENOMEM;
-
- m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
- addr = (*mqd_mem_obj)->gpu_addr;
-
- memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
-
- m->header = 0xC0310800;
- m->compute_pipelinestat_enable = 1;
- m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
- m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
-
- m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
- PRELOAD_REQ;
- m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
- QUANTUM_DURATION(10);
-
- m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
- m->cp_mqd_base_addr_lo = lower_32_bits(addr);
- m->cp_mqd_base_addr_hi = upper_32_bits(addr);
-
- m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
-
- /*
- * Pipe Priority
- * Identifies the pipe relative priority when this queue is connected
- * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
- * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
- * 0 = CS_LOW (typically below GFX)
- * 1 = CS_MEDIUM (typically between HP3D and GFX
- * 2 = CS_HIGH (typically above HP3D)
- */
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
- *mqd = m;
- if (gart_addr)
- *gart_addr = addr;
- retval = mm->update_mqd(mm, m, q);
-
- return retval;
+ return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
}
static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 12b66330fc6d..0eeee3c6d6dc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -97,17 +97,29 @@
#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
+
+#define KFD_KERNEL_QUEUE_SIZE 2048
+
+/*
+ * 512 = 0x200
+ * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
+ * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
+ * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
+ * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
+ * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
+ */
+#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
+
+
/*
* Kernel module parameter to specify maximum number of supported queues per
* device
*/
extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
- (KFD_MAX_NUM_OF_PROCESSES * \
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
-
-#define KFD_KERNEL_QUEUE_SIZE 2048
/* Kernel module parameter to specify the scheduling policy */
extern int sched_policy;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 80b36e860a0a..4bdae78bab8e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -607,13 +607,17 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
if (!qpd->doorbell_bitmap)
return -ENOMEM;
- /* Mask out any reserved doorbells */
- for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
- if ((dev->shared_resources.reserved_doorbell_mask & i) ==
- dev->shared_resources.reserved_doorbell_val) {
+ /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
+ if (i >= dev->shared_resources.non_cp_doorbells_start
+ && i <= dev->shared_resources.non_cp_doorbells_end) {
set_bit(i, qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x\n", i);
+ set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ qpd->doorbell_bitmap);
+ pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
+ i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3a6f595f295e..fb27783d7a54 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
+ /* Update to correct count(s) if racing with vblank irq */
+ amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* wake up userspace */
if (amdgpu_crtc->event) {
- /* Update to correct count(s) if racing with vblank irq */
- drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
-
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
@@ -786,12 +785,13 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
@@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link)
return;
}
+ /* dc_sink_create returns a new reference */
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
@@ -952,6 +953,8 @@ static int dm_resume(void *handle)
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
@@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
sink = aconnector->dc_link->local_sink;
+ if (sink)
+ dc_sink_retain(sink);
/*
* Edid mgmt connector gets first update only in mode_valid hook and then
@@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
* to it anymore after disconnect, so on next crtc to connector
* reshuffle by UMD we will get into unwanted dc_sink release
*/
- if (aconnector->dc_sink != aconnector->dc_em_sink)
- dc_sink_release(aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
}
aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
amdgpu_dm_update_freesync_caps(connector,
aconnector->edid);
} else {
amdgpu_dm_update_freesync_caps(connector, NULL);
- if (!aconnector->dc_sink)
+ if (!aconnector->dc_sink) {
aconnector->dc_sink = aconnector->dc_em_sink;
- else if (aconnector->dc_sink != aconnector->dc_em_sink)
dc_sink_retain(aconnector->dc_sink);
+ }
}
mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
return;
}
@@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
- if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_sink_release(sink);
return;
+ }
if (aconnector->dc_sink == sink) {
/*
@@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
*/
DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
+ if (sink)
+ dc_sink_release(sink);
return;
}
@@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
@@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
amdgpu_dm_update_freesync_caps(connector, NULL);
drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
+ dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
}
mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
}
static void handle_hpd_irq(void *param)
@@ -2977,6 +2994,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
return stream;
} else {
sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
}
stream = dc_create_stream_for_sink(sink);
@@ -3042,8 +3060,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
update_stream_signal(stream, sink);
finish:
- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
- dc_sink_release(sink);
+ dc_sink_release(sink);
return stream;
}
@@ -3301,6 +3318,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
dm->backlight_dev = NULL;
}
#endif
+
+ if (aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_em_sink);
+ aconnector->dc_em_sink = NULL;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -3398,10 +3423,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
(edid->extensions + 1) * EDID_LENGTH,
&init_params);
- if (aconnector->base.force == DRM_FORCE_ON)
+ if (aconnector->base.force == DRM_FORCE_ON) {
aconnector->dc_sink = aconnector->dc_link->local_sink ?
aconnector->dc_link->local_sink :
aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
}
static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
@@ -3790,7 +3817,6 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
* check will succeed, and let DC implement proper check
*/
static const uint32_t rgb_formats[] = {
- DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
@@ -4646,6 +4672,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct amdgpu_bo *abo;
uint64_t tiling_flags, dcc_address;
uint32_t target, target_vblank;
+ uint64_t last_flip_vblank;
+ bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
@@ -4678,10 +4706,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
- if (plane->type == DRM_PLANE_TYPE_CURSOR) {
- handle_cursor_update(plane, old_plane_state);
+ /* Cursor plane is handled after stream updates */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
- }
if (!fb || !crtc || pcrtc != crtc)
continue;
@@ -4712,14 +4739,21 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
*/
abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0)) {
+ if (unlikely(r != 0))
DRM_ERROR("failed to reserve buffer before flip\n");
- WARN_ON(1);
- }
- /* Wait for all fences on this FB */
- WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT) < 0);
+ /*
+ * Wait for all fences on this FB. Do limited wait to avoid
+ * deadlock during GPU reset when this fence will not signal
+ * but we hold reservation lock for the BO.
+ */
+ r = reservation_object_wait_timeout_rcu(abo->tbo.resv,
+ true, false,
+ msecs_to_jiffies(5000));
+ if (unlikely(r == 0))
+ DRM_ERROR("Waiting for fences timed out.");
+
+
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
@@ -4799,7 +4833,31 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* hopefully eliminating dc_*_update structs in their entirety.
*/
if (flip_count) {
- target = (uint32_t)drm_crtc_vblank_count(pcrtc) + *wait_for_vblank;
+ if (!vrr_active) {
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., for
+ * clients using the GLX_OML_sync_control extension or
+ * DRI3/Present extension with defined target_msc.
+ */
+ last_flip_vblank = drm_crtc_vblank_count(pcrtc);
+ }
+ else {
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr
+ * flips per video frame by use of throttling, but allow
+ * flip programming anywhere in the possibly large
+ * variable vrr vblank interval for fine-grained flip
+ * timing control and more opportunity to avoid stutter
+ * on late submission of flips.
+ */
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ last_flip_vblank = acrtc_attach->last_flip_vblank;
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ target = (uint32_t)last_flip_vblank + *wait_for_vblank;
+
/* Prepare wait for target vblank early - before the fence-waits */
target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
@@ -4874,6 +4932,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
mutex_unlock(&dm->dc_lock);
}
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ handle_cursor_update(plane, old_plane_state);
+
cleanup:
kfree(flip);
kfree(full);
@@ -5799,14 +5861,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
num_plane = 0;
- if (!new_dm_crtc_state->stream) {
- if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
- update_type = UPDATE_TYPE_FULL;
- goto cleanup;
- }
+ if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) {
+ update_type = UPDATE_TYPE_FULL;
+ goto cleanup;
+ }
+ if (!new_dm_crtc_state->stream)
continue;
- }
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
new_plane_crtc = new_plane_state->crtc;
@@ -5817,6 +5878,11 @@ dm_determine_update_type_for_commit(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
+ if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) {
+ update_type = UPDATE_TYPE_FULL;
+ goto cleanup;
+ }
+
if (!state->allow_modeset)
continue;
@@ -5955,6 +6021,42 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ /*
+ * Add all primary and overlay planes on the CRTC to the state
+ * whenever a plane is enabled to maintain correct z-ordering
+ * and to enable fast surface updates.
+ */
+ drm_for_each_crtc(crtc, dev) {
+ bool modified = false;
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (new_plane_state->crtc == crtc ||
+ old_plane_state->crtc == crtc) {
+ modified = true;
+ break;
+ }
+ }
+
+ if (!modified)
+ continue;
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state)) {
+ ret = PTR_ERR(new_plane_state);
+ goto fail;
+ }
+ }
+ }
+
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f51d52eb52e6..c4ea3a91f17a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -191,6 +191,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
&init_params);
dc_sink->priv = aconnector;
+ /* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
if (aconnector->dc_sink)
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index a1c56f29cfeb..fd5266a58297 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -265,6 +265,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
+ /* fall through */
case OBJECT_TYPE_CONNECTOR:
case OBJECT_TYPE_GENERIC:
/* Both Generic and Connector Object ID
@@ -277,6 +278,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
+ /* fall through */
default:
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 12d1842079ae..eb62d10bb65c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1348,12 +1348,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
bool res;
- kernel_fpu_begin();
-
/* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
+ kernel_fpu_begin();
+
if (res)
res = verify_clock_values(&fclks);
@@ -1372,9 +1372,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
+ kernel_fpu_end();
+
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
+ kernel_fpu_begin();
+
if (res)
res = verify_clock_values(&dcfclks);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 52f838442e21..c68fbd55db3c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1138,6 +1138,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->mode_changed = false;
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1623,13 +1626,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->adjust->v_total_min,
stream_update->adjust->v_total_max);
- if (stream_update->periodic_vsync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE0, &stream->periodic_vsync_config);
+ if (stream_update->periodic_interrupt0 &&
+ dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
- if (stream_update->enhanced_sync_config && pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
- pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, VLINE1, &stream->enhanced_sync_config);
+ if (stream_update->periodic_interrupt1 &&
+ dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1);
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7f5a947ad31d..4eba3c4800b6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -794,6 +794,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
sink->converter_disable_audio = converter_disable_audio;
+ /* dc_sink_create returns a new reference */
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
@@ -2037,6 +2038,9 @@ static enum dc_status enable_link(
break;
}
+ if (status == DC_OK)
+ pipe_ctx->stream->link->link_status.link_active = true;
+
return status;
}
@@ -2060,6 +2064,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
dp_disable_link_phy_mst(link, signal);
} else
link->link_enc->funcs->disable_output(link->link_enc, signal);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count <= 0)
+ link->link_status.link_active = false;
+ } else {
+ link->link_status.link_active = false;
+ }
}
static bool dp_active_dongle_validate_timing(
@@ -2623,8 +2635,6 @@ void core_link_enable_stream(
}
}
- stream->link->link_status.link_active = true;
-
core_dc->hwss.enable_audio_stream(pipe_ctx);
/* turn off otg test pattern if enable */
@@ -2659,8 +2669,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
-
- pipe_ctx->stream->link->link_status.link_active = false;
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index a798694992b9..5657cb3a2ad3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -51,9 +51,19 @@ struct freesync_context {
bool dummy;
};
-union vline_config {
- unsigned int line_number;
- unsigned long long delta_in_ns;
+enum vertical_interrupt_ref_point {
+ START_V_UPDATE = 0,
+ START_V_SYNC,
+ INVALID_POINT
+
+ //For now, only v_update interrupt is used.
+ //START_V_BLANK,
+ //START_V_ACTIVE
+};
+
+struct periodic_interrupt_config {
+ enum vertical_interrupt_ref_point ref_point;
+ int lines_offset;
};
@@ -106,8 +116,8 @@ struct dc_stream_state {
/* DMCU info */
unsigned int abm_level;
- union vline_config periodic_vsync_config;
- union vline_config enhanced_sync_config;
+ struct periodic_interrupt_config periodic_interrupt0;
+ struct periodic_interrupt_config periodic_interrupt1;
/* from core_stream struct */
struct dc_context *ctx;
@@ -158,8 +168,8 @@ struct dc_stream_update {
struct dc_info_packet *hdr_static_metadata;
unsigned int *abm_level;
- union vline_config *periodic_vsync_config;
- union vline_config *enhanced_sync_config;
+ struct periodic_interrupt_config *periodic_interrupt0;
+ struct periodic_interrupt_config *periodic_interrupt1;
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 01e56f1a9f34..da96229db53a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -53,6 +53,27 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ uint32_t rampingBoundary = 0xFFFF;
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* set ramping boundary */
+ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+ /* setDMCUParam_Pipe */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+ MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
{
@@ -175,7 +196,6 @@ static void dmcu_set_backlight_level(
uint32_t controller_id)
{
unsigned int backlight_8_bit = 0;
- uint32_t rampingBoundary = 0xFFFF;
uint32_t s2;
if (backlight_pwm_u16_16 & 0x10000)
@@ -185,16 +205,7 @@ static void dmcu_set_backlight_level(
// Take MSB of fractional part since backlight is not max
backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
- /* set ramping boundary */
- REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
-
- /* setDMCUParam_Pipe */
- REG_UPDATE_2(MASTER_COMM_CMD_REG,
- MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
- MASTER_COMM_CMD_REG_BYTE1, controller_id);
-
- /* notifyDMCUMsg */
- REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ dce_abm_set_pipe(&abm_dce->base, controller_id);
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -309,16 +320,7 @@ static bool dce_abm_immediate_disable(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
- 1, 80000);
-
- /* setDMCUParam_ABMLevel */
- REG_UPDATE_2(MASTER_COMM_CMD_REG,
- MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
- MASTER_COMM_CMD_REG_BYTE1, MCP_DISABLE_ABM_IMMEDIATELY);
-
- /* notifyDMCUMsg */
- REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
abm->stored_backlight_registers.BL_PWM_CNTL =
REG_READ(BL_PWM_CNTL);
@@ -419,6 +421,7 @@ static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
.init_backlight = dce_abm_init_backlight,
+ .set_pipe = dce_abm_set_pipe,
.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
.get_current_backlight = dce_abm_get_current_backlight,
.get_target_backlight = dce_abm_get_target_backlight,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index bbe051736a18..6e142c2db986 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -696,6 +696,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int patched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ patched_disp_clk = patched_disp_clk * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
@@ -705,9 +710,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
}
- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
- clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 85686d917636..a24a2bda8656 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
sign = 1;
floating = 1;
- /* no break */
+ /* fall through */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index acd418515346..a6b80fdaa666 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
+void dce100_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e1b285ea01ac..5e4db3712eef 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1300,6 +1300,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct drr_params params = {0};
unsigned int event_triggers = 0;
+ if (dc->hwss.disable_stream_gating) {
+ dc->hwss.disable_stream_gating(dc, pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output;
@@ -1329,10 +1333,8 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
dc->hwss.enable_stream_timing(pipe_ctx, context, dc);
- if (pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt)
- pipe_ctx->stream_res.tg->funcs->program_vupdate_interrupt(
- pipe_ctx->stream_res.tg,
- &stream->timing);
+ if (dc->hwss.setup_vupdate_interrupt)
+ dc->hwss.setup_vupdate_interrupt(pipe_ctx);
params.vertical_total_min = stream->adjust.v_total_min;
params.vertical_total_max = stream->adjust.v_total_max;
@@ -1521,6 +1523,14 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
struct dc_link *edp_link = get_link_for_edp(dc);
bool can_edp_fast_boot_optimize = false;
bool apply_edp_fast_boot_optimization = false;
+ bool can_apply_seamless_boot = false;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
+ }
+ }
if (edp_link) {
/* this seems to cause blank screens on DCE8 */
@@ -1549,7 +1559,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
- if (!apply_edp_fast_boot_optimization) {
+ if (!apply_edp_fast_boot_optimization && !can_apply_seamless_boot) {
if (edp_link_to_turnoff) {
/*turn off backlight before DP_blank and encoder powered down*/
dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
@@ -2676,6 +2686,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.set_static_screen_control = set_static_screen_control,
.reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
.enable_stream_timing = dce110_enable_stream_timing,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index a60a90e68d91..c4543178ba20 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
- dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2eca81b5cf2f..c109ace96be9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -792,9 +792,22 @@ bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 117d9d8227f7..d1a8f1c302a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -959,9 +959,25 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
+ bool can_apply_seamless_boot = false;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* There is assumption that pipe_ctx is not mapping irregularly
+ * to non-preferred front end. If pipe_ctx->stream is not NULL,
+ * we will use the pipe, so don't disable
+ */
+ if (pipe_ctx->stream != NULL)
+ continue;
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->lock(tg);
@@ -975,7 +991,9 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
- dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+ /* Cannot reset the MPC mux if seamless boot */
+ if (!can_apply_seamless_boot)
+ dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
@@ -983,6 +1001,16 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ // W/A for issue with dc_post_update_surfaces_to_stream
+ hubp->power_gated = true;
+
+ /* There is assumption that pipe_ctx is not mapping irregularly
+ * to non-preferred front end. If pipe_ctx->stream is not NULL,
+ * we will use the pipe, so don't disable
+ */
+ if (pipe_ctx->stream != NULL)
+ continue;
+
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1137,11 +1165,13 @@ static void reset_hw_ctx_wrap(
struct clock_source *old_clk = pipe_ctx_old->clock_source;
reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (dc->hwss.enable_stream_gating) {
+ dc->hwss.enable_stream_gating(dc, pipe_ctx);
+ }
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
}
-
}
static bool patch_address_for_sbs_tb_stereo(
@@ -2162,8 +2192,10 @@ static void dcn10_blank_pixel_data(
if (!blank) {
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
- if (stream_res->abm)
+ if (stream_res->abm) {
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
+ }
} else if (blank) {
if (stream_res->abm)
stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
@@ -2661,8 +2693,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
- pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
+ pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2709,6 +2741,147 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &opt_attr);
}
+/**
+* apply_front_porch_workaround TODO FPGA still need?
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void apply_front_porch_workaround(
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *optc = pipe_ctx->stream_res.tg;
+ const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+ struct dc_crtc_timing patched_crtc_timing;
+ int vesa_sync_start;
+ int asic_blank_end;
+ int interlace_factor;
+ int vertical_line_start;
+
+ patched_crtc_timing = *dc_crtc_timing;
+ apply_front_porch_workaround(&patched_crtc_timing);
+
+ interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ vertical_line_start = asic_blank_end -
+ optc->dlg_otg_param.vstartup_start + 1;
+
+ return vertical_line_start;
+}
+
+static void calc_vupdate_position(
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line)
+{
+ const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
+ int vline_int_offset_from_vupdate =
+ pipe_ctx->stream->periodic_interrupt0.lines_offset;
+ int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
+ int start_position;
+
+ if (vline_int_offset_from_vupdate > 0)
+ vline_int_offset_from_vupdate--;
+ else if (vline_int_offset_from_vupdate < 0)
+ vline_int_offset_from_vupdate++;
+
+ start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
+
+ if (start_position >= 0)
+ *start_line = start_position;
+ else
+ *start_line = dc_crtc_timing->v_total + start_position - 1;
+
+ *end_line = *start_line + 2;
+
+ if (*end_line >= dc_crtc_timing->v_total)
+ *end_line = 2;
+}
+
+static void cal_vline_position(
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline,
+ uint32_t *start_line,
+ uint32_t *end_line)
+{
+ enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
+
+ if (vline == VLINE0)
+ ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
+ else if (vline == VLINE1)
+ ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
+
+ switch (ref_point) {
+ case START_V_UPDATE:
+ calc_vupdate_position(
+ pipe_ctx,
+ start_line,
+ end_line);
+ break;
+ case START_V_SYNC:
+ // Suppose to do nothing because vsync is 0;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+static void dcn10_setup_periodic_interrupt(
+ struct pipe_ctx *pipe_ctx,
+ enum vline_select vline)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (vline == VLINE0) {
+ uint32_t start_line = 0;
+ uint32_t end_line = 0;
+
+ cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
+
+ tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
+
+ } else if (vline == VLINE1) {
+ pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
+ tg,
+ pipe_ctx->stream->periodic_interrupt1.lines_offset);
+ }
+}
+
+static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0) {
+ ASSERT(0);
+ start_line = 0;
+ }
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2756,7 +2929,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index f8eea10e4c64..6d66084df55f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -81,4 +81,6 @@ struct pipe_ctx *find_top_pipe_for_stream(
struct dc_state *context,
const struct dc_stream_state *stream);
+int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 2f78a84f0dcb..0345d51e9d6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -92,134 +92,36 @@ static void optc1_disable_stereo(struct timing_generator *optc)
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
-static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing)
-{
- struct dc_crtc_timing patched_crtc_timing;
- int vesa_sync_start;
- int asic_blank_end;
- int interlace_factor;
- int vertical_line_start;
-
- patched_crtc_timing = *dc_crtc_timing;
- optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
-
- vesa_sync_start = patched_crtc_timing.h_addressable +
- patched_crtc_timing.h_border_right +
- patched_crtc_timing.h_front_porch;
-
- asic_blank_end = patched_crtc_timing.h_total -
- vesa_sync_start -
- patched_crtc_timing.h_border_left;
-
- interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
-
- vesa_sync_start = patched_crtc_timing.v_addressable +
- patched_crtc_timing.v_border_bottom +
- patched_crtc_timing.v_front_porch;
-
- asic_blank_end = (patched_crtc_timing.v_total -
- vesa_sync_start -
- patched_crtc_timing.v_border_top)
- * interlace_factor;
-
- vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- if (vertical_line_start < 0) {
- ASSERT(0);
- vertical_line_start = 0;
- }
-
- return vertical_line_start;
-}
-
-static void calc_vline_position(
+void optc1_setup_vertical_interrupt0(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- unsigned long long vsync_delta,
- uint32_t *start_line,
- uint32_t *end_line)
+ uint32_t start_line,
+ uint32_t end_line)
{
- unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000);
- unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_100hz + 999), 1000);
- uint32_t req_delta_lines = (uint32_t) div64_u64(
- (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1),
- dc_crtc_timing->h_total);
-
- uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing);
-
- if (req_delta_lines != 0)
- req_delta_lines--;
-
- if (req_delta_lines > vsync_line)
- *start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1;
- else
- *start_line = vsync_line - req_delta_lines;
-
- *end_line = *start_line + 2;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- if (*end_line >= dc_crtc_timing->v_total)
- *end_line = 2;
+ REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
+ OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
}
-void optc1_program_vline_interrupt(
+void optc1_setup_vertical_interrupt1(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- enum vline_select vline,
- const union vline_config *vline_config)
+ uint32_t start_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t start_line = 0;
- uint32_t end_line = 0;
-
- switch (vline) {
- case VLINE0:
- calc_vline_position(optc, dc_crtc_timing, vline_config->delta_in_ns, &start_line, &end_line);
- REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0,
- OTG_VERTICAL_INTERRUPT0_LINE_START, start_line,
- OTG_VERTICAL_INTERRUPT0_LINE_END, end_line);
- break;
- case VLINE1:
- REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
- OTG_VERTICAL_INTERRUPT1_LINE_START, vline_config->line_number);
- break;
- default:
- break;
- }
+
+ REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT1_LINE_START, start_line);
}
-void optc1_program_vupdate_interrupt(
+void optc1_setup_vertical_interrupt2(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing)
+ uint32_t start_line)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- int32_t vertical_line_start;
- uint32_t asic_blank_end;
- uint32_t vesa_sync_start;
- struct dc_crtc_timing patched_crtc_timing;
-
- patched_crtc_timing = *dc_crtc_timing;
- optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
-
- /* asic_h_blank_end = HsyncWidth + HbackPorch =
- * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
- * vesa.h_left_border
- */
- vesa_sync_start = patched_crtc_timing.h_addressable +
- patched_crtc_timing.h_border_right +
- patched_crtc_timing.h_front_porch;
-
- asic_blank_end = patched_crtc_timing.h_total -
- vesa_sync_start -
- patched_crtc_timing.h_border_left;
-
- /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
- * program the reg for interrupt postition.
- */
- vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- if (vertical_line_start < 0)
- vertical_line_start = 0;
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
- OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+ OTG_VERTICAL_INTERRUPT2_LINE_START, start_line);
}
/**
@@ -1480,8 +1382,9 @@ bool optc1_get_crc(struct timing_generator *optc,
static const struct timing_generator_funcs dcn10_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
- .program_vline_interrupt = optc1_program_vline_interrupt,
- .program_vupdate_interrupt = optc1_program_vupdate_interrupt,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc1_program_global_sync,
.enable_crtc = optc1_enable_crtc,
.disable_crtc = optc1_disable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 24452f11c598..4eb9a898c237 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -483,11 +483,16 @@ void optc1_program_timing(
const struct dc_crtc_timing *dc_crtc_timing,
bool use_vbios);
-void optc1_program_vline_interrupt(
+void optc1_setup_vertical_interrupt0(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- enum vline_select vline,
- const union vline_config *vline_config);
+ uint32_t start_line,
+ uint32_t end_line);
+void optc1_setup_vertical_interrupt1(
+ struct timing_generator *optc,
+ uint32_t start_line);
+void optc1_setup_vertical_interrupt2(
+ struct timing_generator *optc,
+ uint32_t start_line);
void optc1_program_global_sync(
struct timing_generator *optc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index abc961c0906e..86dc39a02408 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -46,6 +46,7 @@ struct abm_funcs {
void (*abm_init)(struct abm *abm);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm);
+ bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
bool (*init_backlight)(struct abm *abm);
/* backlight_pwm_u16_16 is unsigned 32 bit,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 03ae941895f3..c25f7df7b5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -134,14 +134,6 @@ struct dc_crtc_timing;
struct drr_params;
-union vline_config;
-
-
-enum vline_select {
- VLINE0,
- VLINE1,
- VLINE2
-};
struct timing_generator_funcs {
bool (*validate_timing)(struct timing_generator *tg,
@@ -149,14 +141,17 @@ struct timing_generator_funcs {
void (*program_timing)(struct timing_generator *tg,
const struct dc_crtc_timing *timing,
bool use_vbios);
- void (*program_vline_interrupt)(
+ void (*setup_vertical_interrupt0)(
+ struct timing_generator *optc,
+ uint32_t start_line,
+ uint32_t end_line);
+ void (*setup_vertical_interrupt1)(
+ struct timing_generator *optc,
+ uint32_t start_line);
+ void (*setup_vertical_interrupt2)(
struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing,
- enum vline_select vline,
- const union vline_config *vline_config);
+ uint32_t start_line);
- void (*program_vupdate_interrupt)(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing);
bool (*enable_crtc)(struct timing_generator *tg);
bool (*disable_crtc)(struct timing_generator *tg);
bool (*is_counter_moving)(struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 341b4810288c..7676f25216b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -38,6 +38,11 @@ enum pipe_gating_control {
PIPE_GATING_CONTROL_INIT
};
+enum vline_select {
+ VLINE0,
+ VLINE1
+};
+
struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
@@ -68,6 +73,10 @@ struct stream_resource;
struct hw_sequencer_funcs {
+ void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+ void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+
void (*init_hw)(struct dc *dc);
void (*init_pipes)(struct dc *dc, struct dc_state *context);
@@ -220,6 +229,9 @@ struct hw_sequencer_funcs {
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline);
+ void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx);
+
};
void color_space_to_black_color(
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 4f501ddcfb8d..34d6fdcb32e2 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -131,6 +131,7 @@
#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
#define RAVEN_A0 0x01
#define RAVEN_B0 0x21
+#define PICASSO_A0 0x41
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
/* DCN1_01 */
#define RAVEN2_A0 0x81
@@ -165,4 +166,6 @@
#define FAMILY_UNKNOWN 0xFF
+
+
#endif /* __DAL_ASIC_ID_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 94a84bc57c7a..bfd27f10879e 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -724,7 +724,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
static void build_vrr_infopacket_v2(enum signal_type signal,
const struct mod_vrr_params *vrr,
- const enum color_transfer_func *app_tf,
+ enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
unsigned int payload_size = 0;
@@ -732,8 +732,7 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
build_vrr_infopacket_data(vrr, infopacket);
- if (app_tf != NULL)
- build_vrr_infopacket_fs2_data(*app_tf, infopacket);
+ build_vrr_infopacket_fs2_data(app_tf, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
@@ -757,7 +756,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
enum vrr_packet_type packet_type,
- const enum color_transfer_func *app_tf,
+ enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
/* SPD info packet for FreeSync
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 4222e403b151..dcef85994c45 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -145,7 +145,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
enum vrr_packet_type packet_type,
- const enum color_transfer_func *app_tf,
+ enum color_transfer_func app_tf,
struct dc_info_packet *infopacket);
void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 3ba87b076287..038b88221c5f 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -165,18 +165,11 @@ struct iram_table_v_2_2 {
};
#pragma pack(pop)
-static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
-{
- return (uint16_t)(backlight_8bit * 0x101);
-}
-
static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
struct iram_table_v_2 *table)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
- unsigned int query_input_8bit;
- unsigned int query_output_8bit;
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
@@ -194,16 +187,13 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
- query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
-
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
table->backlight_thresholds[i] =
- backlight_8_to_16(query_input_8bit);
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] =
- backlight_8_to_16(query_output_8bit);
+ cpu_to_be16(params.backlight_lut_array[lut_index]);
}
}
@@ -212,8 +202,6 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
- unsigned int query_input_8bit;
- unsigned int query_output_8bit;
unsigned int lut_index;
table->backlight_thresholds[0] = 0;
@@ -231,16 +219,13 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
* format U4.10.
*/
for (i = 1; i+1 < num_entries; i++) {
- query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
-
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
table->backlight_thresholds[i] =
- backlight_8_to_16(query_input_8bit);
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
table->backlight_offsets[i] =
- backlight_8_to_16(query_output_8bit);
+ cpu_to_be16(params.backlight_lut_array[lut_index]);
}
}
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 83d960110d23..5f3c10ebff08 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -137,20 +137,17 @@ struct kgd2kfd_shared_resources {
/* Bit n == 1 means Queue n is available for KFD */
DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
- /* Doorbell assignments (SOC15 and later chips only). Only
+ /* SDMA doorbell assignments (SOC15 and later chips only). Only
* specific doorbells are routed to each SDMA engine. Others
* are routed to IH and VCN. They are not usable by the CP.
- *
- * Any doorbell number D that satisfies the following condition
- * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
- *
- * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
- * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
- * mask would be set to 0x1e0 and val set to 0x0e0.
*/
- unsigned int sdma_doorbell[2][8];
- unsigned int reserved_doorbell_mask;
- unsigned int reserved_doorbell_val;
+ uint32_t *sdma_doorbell_idx;
+
+ /* From SOC15 onward, the doorbell index range not usable for CP
+ * queues.
+ */
+ uint32_t non_cp_doorbells_start;
+ uint32_t non_cp_doorbells_end;
/* Base address of doorbell aperture. */
phys_addr_t doorbell_physical_address;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ce177d7f04cb..6bf48934fdc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -277,8 +277,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
if (!skip_display_settings)
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
- if ((hwmgr->request_dpm_level != hwmgr->dpm_level) &&
- !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
+ if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
hwmgr->dpm_level = hwmgr->request_dpm_level;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 4588bddf8b33..615cf2c09e54 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -489,15 +489,16 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
}
int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
- uint8_t id, uint32_t *frequency)
+ uint8_t clk_id, uint8_t syspll_id,
+ uint32_t *frequency)
{
struct amdgpu_device *adev = hwmgr->adev;
struct atom_get_smu_clock_info_parameters_v3_1 parameters;
struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
uint32_t ix;
- parameters.clk_id = id;
- parameters.syspll_id = 0;
+ parameters.clk_id = clk_id;
+ parameters.syspll_id = syspll_id;
parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
parameters.dfsdid = 0;
@@ -530,20 +531,23 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulSocClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulDCEFClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulEClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulVClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency))
boot_values->ulDClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency))
+ boot_values->ulFClk = frequency;
}
static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
@@ -563,19 +567,19 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
boot_values->ulSocClk = 0;
boot_values->ulDCEFClk = 0;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency))
boot_values->ulSocClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency))
boot_values->ulDCEFClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency))
boot_values->ulEClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency))
boot_values->ulVClk = frequency;
- if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency))
boot_values->ulDClk = frequency;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe9e8ceef50e..b7e2651b570b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -139,6 +139,7 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulEClk;
uint32_t ulVClk;
uint32_t ulDClk;
+ uint32_t ulFClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
@@ -236,7 +237,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_smc_dpm_parameters *param);
int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
- uint8_t id, uint32_t *frequency);
+ uint8_t clk_id, uint8_t syspll_id,
+ uint32_t *frequency);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 5273de3c5b98..0ad8fe4a6277 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -139,12 +139,10 @@ static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
static int smu10_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr)
{
- uint32_t table_size =
- sizeof(struct phm_clock_voltage_dependency_table) +
- (7 * sizeof(struct phm_clock_voltage_dependency_record));
+ struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_clock_voltage_dependency_table *table_clk_vlt =
- kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
+ GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c8f5c00dd1e7..83d3d935f3ac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_94, 0);
+ ixSMU_PM_STATUS_95, 0);
for (i = 0; i < 10; i++) {
- mdelay(1);
+ mdelay(500);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
tmp = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
- ixSMU_PM_STATUS_94);
+ ixSMU_PM_STATUS_95);
if (tmp != 0)
break;
}
@@ -3681,10 +3681,12 @@ static int smu7_request_link_speed_change_before_state_change(
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
+ /* fall through */
case PP_PCIEGen2:
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
#endif
+ /* fall through */
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index d138ddae563d..58f5589aaf12 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -1211,7 +1211,7 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
- if (hwmgr->chip_id > CHIP_TONGA)
+ if (hwmgr->chip_id > CHIP_TONGA)
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
else
target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 553a203ac47c..019d6a206492 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -272,12 +272,10 @@ static int smu8_init_dynamic_state_adjustment_rule_settings(
struct pp_hwmgr *hwmgr,
ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
{
- uint32_t table_size =
- sizeof(struct phm_clock_voltage_dependency_table) +
- (7 * sizeof(struct phm_clock_voltage_dependency_record));
+ struct phm_clock_voltage_dependency_table *table_clk_vlt;
- struct phm_clock_voltage_dependency_table *table_clk_vlt =
- kzalloc(table_size, GFP_KERNEL);
+ table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7),
+ GFP_KERNEL);
if (NULL == table_clk_vlt) {
pr_err("Can not allocate memory!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index f94dab27f486..7337be5602e4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_hw_ip.h"
@@ -114,7 +136,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
- return -1;
+ return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
ARRAY_SIZE(enter_baco_tbl)))
@@ -132,5 +154,5 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
}
}
- return -1;
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
index a93b1e6d1c66..f7a3ffa744b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef __VEGA10_BOCO_H__
-#define __VEGA10_BOCO_H__
+#ifndef __VEGA10_BACO_H__
+#define __VEGA10_BACO_H__
#include "hwmgr.h"
#include "common_baco.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5479125ff4f6..5c4f701939ea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2575,10 +2575,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
- SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
+ SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk);
pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
- SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
+ SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk);
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
@@ -4407,9 +4407,9 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
features_to_disable =
- (features_enabled ^ new_ppfeature_masks) & features_enabled;
+ features_enabled & ~new_ppfeature_masks;
features_to_enable =
- (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
+ ~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 6c8e78611c03..bdb48e94eff6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2009,9 +2009,9 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
features_to_disable =
- (features_enabled ^ new_ppfeature_masks) & features_enabled;
+ features_enabled & ~new_ppfeature_masks;
features_to_enable =
- (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
+ ~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 0d883b358df2..5e8602a79b1c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_hw_ip.h"
@@ -67,14 +89,14 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
- return -1;
+ return -EINVAL;
} else if (state == BACO_STATE_OUT) {
if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
- return -1;
+ return -EINVAL;
if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
- return -1;
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
index c51988a9ed77..51c7f8392925 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef __VEGA20_BOCO_H__
-#define __VEGA20_BOCO_H__
+#ifndef __VEGA20_BACO_H__
+#define __VEGA20_BACO_H__
#include "hwmgr.h"
#include "common_baco.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 0769b1ec562b..9aa7bec1b5fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -463,9 +463,9 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
{
dpm_state->soft_min_level = 0x0;
- dpm_state->soft_max_level = 0xffff;
+ dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_state->hard_min_level = 0x0;
- dpm_state->hard_max_level = 0xffff;
+ dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
}
static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
@@ -711,8 +711,10 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!ret,
"[SetupDefaultDpmTable] failed to get fclk dpm levels!",
return ret);
- } else
- dpm_table->count = 0;
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
+ }
vega20_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
@@ -754,6 +756,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.eclock = boot_up_values.ulEClk;
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.fclock = boot_up_values.ulFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -780,6 +783,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
int ret;
@@ -816,6 +821,10 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
+ data->pcie_parameters_override = 1;
+ data->pcie_gen_level1 = pcie_gen;
+ data->pcie_width_level1 = pcie_width;
+
return 0;
}
@@ -979,6 +988,8 @@ static int vega20_od8_set_feature_capabilities(
}
if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
+ data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
@@ -2314,32 +2325,8 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega20_hwmgr *data =
- (struct vega20_hwmgr *)(hwmgr->backend);
- uint32_t soft_min_level, soft_max_level;
int ret = 0;
- soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->dpm_table.gfx_table.dpm_state.soft_min_level =
- data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
- data->dpm_table.gfx_table.dpm_state.soft_max_level =
- data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
-
- soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
- soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
- data->dpm_table.mem_table.dpm_state.soft_min_level =
- data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
- data->dpm_table.mem_table.dpm_state.soft_max_level =
- data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
-
- soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
- soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
- data->dpm_table.soc_table.dpm_state.soft_min_level =
- data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
- data->dpm_table.soc_table.dpm_state.soft_max_level =
- data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
-
ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
@@ -2641,9 +2628,8 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled,
- "[GetSclks]: gfxclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = count;
@@ -2670,9 +2656,8 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled,
- "[GetMclks]: uclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_UCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = data->mclk_latency_table.count = count;
@@ -2696,9 +2681,8 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled,
- "[GetDcfclocks]: dcefclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = count;
@@ -2719,9 +2703,8 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
int i, count;
- PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled,
- "[GetSocclks]: socclk dpm not enabled!\n",
- return -EPERM);
+ if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
+ return -1;
count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
clocks->num_levels = count;
@@ -2799,7 +2782,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
- struct pp_clock_levels_with_latency clocks;
int32_t input_index, input_clk, input_vol, i;
int od8_id;
int ret;
@@ -2858,11 +2840,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return -EOPNOTSUPP;
}
- ret = vega20_get_memclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get memory clk levels failed!",
- return ret);
-
for (i = 0; i < size; i += 2) {
if (i + 2 > size) {
pr_info("invalid number of input parameters %d\n",
@@ -2879,11 +2856,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
+ if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
pr_info("clock freq %d is not within allowed range [%d - %d]\n",
input_clk,
- clocks.data[0].clocks_in_khz / 1000,
+ od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
return -EINVAL;
}
@@ -3088,9 +3065,9 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return ret;
features_to_disable =
- (features_enabled ^ new_ppfeature_masks) & features_enabled;
+ features_enabled & ~new_ppfeature_masks;
features_to_enable =
- (features_enabled ^ new_ppfeature_masks) ^ features_to_disable;
+ ~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
@@ -3128,7 +3105,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
&(data->dpm_table.fclk_table);
int i, now, size = 0;
int ret = 0;
- uint32_t gen_speed, lane_width;
+ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
switch (type) {
case PP_SCLK:
@@ -3137,10 +3114,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current gfx clk Failed!",
return ret);
- ret = vega20_get_sclks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get gfx clk levels Failed!",
- return ret);
+ if (vega20_get_sclks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3154,10 +3132,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current mclk freq Failed!",
return ret);
- ret = vega20_get_memclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get memory clk levels Failed!",
- return ret);
+ if (vega20_get_memclocks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3171,10 +3150,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current socclk freq Failed!",
return ret);
- ret = vega20_get_socclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get soc clk levels Failed!",
- return ret);
+ if (vega20_get_socclocks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3200,10 +3180,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get current dcefclk freq Failed!",
return ret);
- ret = vega20_get_dcefclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Attempt to get dcefclk levels Failed!",
- return ret);
+ if (vega20_get_dcefclocks(hwmgr, &clocks)) {
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
+ now / 100);
+ break;
+ }
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3212,28 +3193,36 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
- gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
- lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
- for (i = 0; i < NUM_LINK_LEVELS; i++)
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ if (i == 1 && data->pcie_parameters_override) {
+ gen_speed = data->pcie_gen_level1;
+ lane_width = data->pcie_width_level1;
+ } else {
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+ }
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
- (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
- (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
- (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
- (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
- (pptable->PcieLaneCount[i] == 1) ? "x1" :
- (pptable->PcieLaneCount[i] == 2) ? "x2" :
- (pptable->PcieLaneCount[i] == 3) ? "x4" :
- (pptable->PcieLaneCount[i] == 4) ? "x8" :
- (pptable->PcieLaneCount[i] == 5) ? "x12" :
- (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ (gen_speed == 0) ? "2.5GT/s," :
+ (gen_speed == 1) ? "5.0GT/s," :
+ (gen_speed == 2) ? "8.0GT/s," :
+ (gen_speed == 3) ? "16.0GT/s," : "",
+ (lane_width == 1) ? "x1" :
+ (lane_width == 2) ? "x2" :
+ (lane_width == 3) ? "x4" :
+ (lane_width == 4) ? "x8" :
+ (lane_width == 5) ? "x12" :
+ (lane_width == 6) ? "x16" : "",
pptable->LclkFreq[i],
- (gen_speed == pptable->PcieGenSpeed[i]) &&
- (lane_width == pptable->PcieLaneCount[i]) ?
+ (current_gen_speed == gen_speed) &&
+ (current_lane_width == lane_width) ?
"*" : "");
+ }
break;
case OD_SCLK:
@@ -3288,13 +3277,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
}
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- ret = vega20_get_memclocks(hwmgr, &clocks);
- PP_ASSERT_WITH_CODE(!ret,
- "Fail to get memory clk levels!",
- return ret);
-
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
- clocks.data[0].clocks_in_khz / 1000,
+ od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@@ -3356,6 +3340,31 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
return ret;
}
+static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+ struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_FCLK].enabled) {
+ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+ "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
+ "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
+ return -EINVAL);
+
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+ "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
+ return ret);
+ }
+
+ return ret;
+}
+
static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
@@ -3366,8 +3375,10 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
+ if (ret)
+ return ret;
- return ret;
+ return vega20_set_fclk_to_highest_dpm_level(hwmgr);
}
static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
@@ -3456,14 +3467,14 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) ||
vblank_too_short;
- latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
/* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
@@ -3485,9 +3496,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* memclk */
dpm_table = &(data->dpm_table.mem_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
@@ -3526,12 +3537,21 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
if (hwmgr->display_config->nb_pstate_switch_disable)
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ /* fclk */
+ dpm_table = &(data->dpm_table.fclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
+ if (hwmgr->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
/* vclk */
dpm_table = &(data->dpm_table.vclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
@@ -3548,9 +3568,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* dclk */
dpm_table = &(data->dpm_table.dclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
@@ -3567,9 +3587,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* socclk */
dpm_table = &(data->dpm_table.soc_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
@@ -3586,9 +3606,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
/* eclk */
dpm_table = &(data->dpm_table.eclk_table);
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
- dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 37f5f5e657da..a5bc758ae097 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -42,6 +42,8 @@
#define AVFS_CURVE 0
#define OD8_HOTCURVE_TEMPERATURE 85
+#define VG20_CLOCK_MAX_DEFAULT 0xFFFF
+
typedef uint32_t PP_Clock;
enum {
@@ -219,6 +221,7 @@ struct vega20_vbios_boot_state {
uint32_t eclock;
uint32_t dclock;
uint32_t vclock;
+ uint32_t fclock;
};
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -523,6 +526,10 @@ struct vega20_hwmgr {
unsigned long metrics_time;
SmuMetrics_t metrics_table;
+
+ bool pcie_parameters_override;
+ uint32_t pcie_gen_level1;
+ uint32_t pcie_width_level1;
};
#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
index 97f8a1a970c3..7a7f15d0c53a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
@@ -32,6 +32,8 @@
#include "cgs_common.h"
#include "vega20_pptable.h"
+#define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105
+
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
{
@@ -798,6 +800,17 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
return 0;
}
+static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable);
+
+ ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE;
+
+ return 0;
+}
+
#define VEGA20_ENGINECLOCK_HARDMAX 198000
static int init_powerplay_table_information(
struct pp_hwmgr *hwmgr,
@@ -887,6 +900,10 @@ static int init_powerplay_table_information(
result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
+ if (result)
+ return result;
+
+ result = override_powerplay_table_fantargettemperature(hwmgr);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 52abca065764..2d4cfe14f72e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2330,6 +2330,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
case DRAM_LOG_BUFF_SIZE:
return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
}
+ break;
case SMU_Discrete_DpmTable:
switch (member) {
case UvdBootLevel:
@@ -2339,6 +2340,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
case LowSclkInterruptThreshold:
return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
}
+ break;
}
pr_warn("can't get the offset of type %x member %x\n", type, member);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
index 079fc8e8f709..742b3dc1f6cb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -40,10 +40,8 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t mp1_fw_flags;
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index a6edd5df33b0..4240aeec9000 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -29,6 +29,10 @@
#include <drm/amdgpu_drm.h>
#include "smumgr.h"
+MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b7ff7d4d6f44..ba00744c3413 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -49,10 +49,8 @@ static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t mp1_fw_flags;
- WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2,
- (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
-
- mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index cb55bdc36f3f..6b6e037258c3 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -145,6 +145,10 @@ static int bochs_pci_probe(struct pci_dev *pdev,
if (IS_ERR(dev))
return PTR_ERR(dev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free_dev;
+
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index cf3f0caf9c63..ed7af7518b52 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -614,7 +614,6 @@ static int snd_dw_hdmi_suspend(struct device *dev)
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold);
- snd_pcm_suspend_all(dw->pcm);
return 0;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index c53ecbd9abdd..40ac19848034 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1608,6 +1608,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
@@ -3030,9 +3039,31 @@ commit:
return 0;
}
-static int __drm_atomic_helper_disable_all(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx,
- bool clean_old_fbs)
+/**
+ * drm_atomic_helper_disable_all - disable all currently active outputs
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Loops through all connectors, finding those that aren't turned off and then
+ * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
+ * that they are connected to.
+ *
+ * This is used for example in suspend/resume to disable all currently active
+ * functions when suspending. If you just want to shut down everything at e.g.
+ * driver unload, look at drm_atomic_helper_shutdown().
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
+ * drm_atomic_helper_shutdown().
+ */
+int drm_atomic_helper_disable_all(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_atomic_state *state;
struct drm_connector_state *conn_state;
@@ -3090,35 +3121,6 @@ free:
drm_atomic_state_put(state);
return ret;
}
-
-/**
- * drm_atomic_helper_disable_all - disable all currently active outputs
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Loops through all connectors, finding those that aren't turned off and then
- * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
- * that they are connected to.
- *
- * This is used for example in suspend/resume to disable all currently active
- * functions when suspending. If you just want to shut down everything at e.g.
- * driver unload, look at drm_atomic_helper_shutdown().
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
- * drm_atomic_helper_shutdown().
- */
-int drm_atomic_helper_disable_all(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- return __drm_atomic_helper_disable_all(dev, ctx, false);
-}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
/**
@@ -3139,7 +3141,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
- ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
+ ret = drm_atomic_helper_disable_all(dev, &ctx);
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b1838a41ad43..83a5bbca6e7e 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -262,6 +262,18 @@ void drm_file_free(struct drm_file *file)
kfree(file);
}
+static void drm_close_helper(struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+
+ mutex_lock(&dev->filelist_mutex);
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->filelist_mutex);
+
+ drm_file_free(file_priv);
+}
+
static int drm_setup(struct drm_device * dev)
{
int ret;
@@ -318,8 +330,10 @@ int drm_open(struct inode *inode, struct file *filp)
goto err_undo;
if (need_setup) {
retcode = drm_setup(dev);
- if (retcode)
+ if (retcode) {
+ drm_close_helper(filp);
goto err_undo;
+ }
}
return 0;
@@ -473,11 +487,7 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("open_count = %d\n", dev->open_count);
- mutex_lock(&dev->filelist_mutex);
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->filelist_mutex);
-
- drm_file_free(file_priv);
+ drm_close_helper(filp);
if (!--dev->open_count) {
drm_lastclose(dev);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 67b1fca39aa6..0e3043e08c69 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
m32.size = map.size;
m32.type = map.type;
m32.flags = map.flags;
- m32.handle = ptr_to_compat(map.handle);
+ m32.handle = ptr_to_compat((void __user *)map.handle);
m32.mtrr = map.mtrr;
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
@@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
m32.offset = map.offset;
m32.mtrr = map.mtrr;
- m32.handle = ptr_to_compat(map.handle);
+ m32.handle = ptr_to_compat((void __user *)map.handle);
if (map.handle != compat_ptr(m32.handle))
pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
map.handle, m32.type, m32.offset);
@@ -526,7 +526,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
if (err)
return err;
- req32.handle = ptr_to_compat(req.handle);
+ req32.handle = ptr_to_compat((void __user *)req.handle);
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 7e6746b2d704..687943df58e1 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -508,6 +508,13 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
+static inline bool
+drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
+{
+ return drm_core_check_feature(dev, DRIVER_RENDER) &&
+ (flags & DRM_RENDER_ALLOW);
+}
+
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
@@ -522,14 +529,19 @@ int drm_version(struct drm_device *dev, void *data,
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
+ const struct drm_device *dev = file_priv->minor->dev;
+
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
- /* AUTH is only for authenticated or render client */
- if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
- !file_priv->authenticated))
- return -EACCES;
+ /* AUTH is only for master ... */
+ if (unlikely((flags & DRM_AUTH) && drm_is_primary_client(file_priv))) {
+ /* authenticated ones, or render capable on DRM_RENDER_ALLOW. */
+ if (!file_priv->authenticated &&
+ !drm_render_driver_and_ioctl(dev, flags))
+ return -EACCES;
+ }
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) &&
@@ -570,7 +582,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 041a77e400d4..21df44b78df3 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -2,7 +2,6 @@
config DRM_ETNAVIV
tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
depends on DRM
- depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
depends on MMU
select SHMEM
select SYNC_FILE
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index acb68c698363..4d5d1a77eb2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -15,8 +15,6 @@ struct etnaviv_perfmon_request;
struct etnaviv_cmdbuf {
/* suballocator this cmdbuf is allocated from */
struct etnaviv_cmdbuf_suballoc *suballoc;
- /* user context key, must be unique between all active users */
- struct etnaviv_file_private *ctx;
/* cmdbuf properties */
int suballoc_offset;
void *vaddr;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 3fbb4855396c..33854c94cb85 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -215,7 +215,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mutex_lock(&obj->lock);
pages = etnaviv_gem_get_pages(obj);
mutex_unlock(&obj->lock);
- if (pages) {
+ if (!IS_ERR(pages)) {
int j;
iter.hdr->data[0] = bomap - bomap_start;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 76079c2291f8..f0abb744ef95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -95,6 +95,7 @@ struct etnaviv_gem_submit_bo {
struct etnaviv_gem_submit {
struct drm_sched_job sched_job;
struct kref refcount;
+ struct etnaviv_file_private *ctx;
struct etnaviv_gpu *gpu;
struct dma_fence *out_fence, *in_fence;
int out_fence_id;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 0566171f8df2..f21529e635e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
- return NULL;
+ return ERR_PTR(-EINVAL);
return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 30875f8f2933..b2fe3446bfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -506,7 +506,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto err_submit_objects;
- submit->cmdbuf.ctx = file->driver_priv;
+ submit->ctx = file->driver_priv;
submit->exec_state = args->exec_state;
submit->flags = args->flags;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f1c88d8ad5ba..f794e04be9e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -320,8 +320,8 @@ etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
domain = &etnaviv_domain->base;
domain->dev = gpu->dev;
- domain->base = 0;
- domain->size = (u64)SZ_1G * 4;
+ domain->base = SZ_4K;
+ domain->size = (u64)SZ_1G * 4 - SZ_4K;
domain->ops = &etnaviv_iommuv2_ops;
ret = etnaviv_iommuv2_init(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 9980d81a26e3..4227a4006c34 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = {
.name = "PE",
.profile_read = VIVS_MC_PROFILE_PE_READ,
.profile_config = VIVS_MC_PROFILE_CONFIG0,
- .nr_signals = 5,
+ .nr_signals = 4,
.signal = (const struct etnaviv_pm_signal[]) {
{
"PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
@@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
dom = meta->domains + signal->domain;
- if (signal->iter > dom->nr_signals)
+ if (signal->iter >= dom->nr_signals)
return -EINVAL;
sig = &dom->signal[signal->iter];
@@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
dom = meta->domains + r->domain;
- if (r->signal > dom->nr_signals)
+ if (r->signal >= dom->nr_signals)
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 67ae26602024..6d24fea1766b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -153,7 +153,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
mutex_lock(&submit->gpu->fence_lock);
ret = drm_sched_job_init(&submit->sched_job, sched_entity,
- submit->cmdbuf.ctx);
+ submit->ctx);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 215b6ff8aa73..db7bb5bd5add 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
struct i915_request *rq)
{
struct i915_active_request *active;
+ int err = 0;
+
+ /* Prevent reaping in case we malloc/wait while building the tree */
+ i915_active_acquire(ref);
active = active_instance(ref, timeline);
- if (IS_ERR(active))
- return PTR_ERR(active);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto out;
+ }
if (!i915_active_request_isset(active))
ref->count++;
__i915_active_request_set(active, rq);
GEM_BUG_ON(!ref->count);
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
bool i915_active_acquire(struct i915_active *ref)
@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
struct active_node *it, *n;
- int ret;
+ int err = 0;
- ret = i915_request_await_active_request(rq, &ref->last);
- if (ret)
- return ret;
+ /* await allocates and so we need to avoid hitting the shrinker */
+ if (i915_active_acquire(ref))
+ goto out; /* was idle */
+
+ err = i915_request_await_active_request(rq, &ref->last);
+ if (err)
+ goto out;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- ret = i915_request_await_active_request(rq, &it->base);
- if (ret)
- return ret;
+ err = i915_request_await_active_request(rq, &it->base);
+ if (err)
+ goto out;
}
- return 0;
+out:
+ i915_active_release(ref);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6630212f2faf..9df65d386d11 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -757,39 +757,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
return ret;
}
-#if !defined(CONFIG_VGA_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- return 0;
-}
-#elif !defined(CONFIG_DUMMY_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- return -ENODEV;
-}
-#else
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- DRM_INFO("Replacing VGA console driver\n");
-
- console_lock();
- if (con_is_bound(&vga_con))
- ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
- if (ret == 0) {
- ret = do_unregister_con_driver(&vga_con);
-
- /* Ignore "already unregistered". */
- if (ret == -ENODEV)
- ret = 0;
- }
- console_unlock();
-
- return ret;
-}
-#endif
-
static void intel_init_dpio(struct drm_i915_private *dev_priv)
{
/*
@@ -1420,7 +1387,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
goto err_ggtt;
}
- ret = i915_kick_out_vgacon(dev_priv);
+ ret = vga_remove_vgacon(pdev);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
goto err_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6728ea5c71d4..30d516e975c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
if (vma->vm_file != filp)
return false;
- return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+ return vma->vm_start == addr &&
+ (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 13d70b90dd0f..b745c49a5af6 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -5,6 +5,7 @@
*/
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
@@ -483,7 +484,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
* counter value.
*/
spin_lock_irqsave(&i915->pmu.lock, flags);
- spin_lock(&kdev->power.lock);
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
@@ -496,16 +496,13 @@ static u64 get_rc6(struct drm_i915_private *i915)
* suspended and if not we cannot do better than report the last
* known RC6 value.
*/
- if (kdev->power.runtime_status == RPM_SUSPENDED) {
- if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
- i915->pmu.suspended_jiffies_last =
- kdev->power.suspended_jiffies;
+ if (pm_runtime_status_suspended(kdev)) {
+ val = pm_runtime_suspended_time(kdev);
- val = kdev->power.suspended_jiffies -
- i915->pmu.suspended_jiffies_last;
- val += jiffies - kdev->power.accounting_timestamp;
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_time_last = val;
- val = jiffies_to_nsecs(val);
+ val -= i915->pmu.suspended_time_last;
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
@@ -515,7 +512,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
}
- spin_unlock(&kdev->power.lock);
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index b3728c5f13e7..4fc4f2478301 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -97,9 +97,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
- * @suspended_jiffies_last: Cached suspend time from PM core.
+ * @suspended_time_last: Cached suspend time from PM core.
*/
- unsigned long suspended_jiffies_last;
+ u64 suspended_time_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d01683167c77..8bc042551692 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -223,8 +223,14 @@ out:
return &p->requests[idx];
}
+struct sched_cache {
+ struct list_head *priolist;
+};
+
static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+sched_lock_engine(const struct i915_sched_node *node,
+ struct intel_engine_cs *locked,
+ struct sched_cache *cache)
{
struct intel_engine_cs *engine = node_to_request(node)->engine;
@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
if (engine != locked) {
spin_unlock(&locked->timeline.lock);
+ memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock);
}
@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq,
static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr)
{
- struct list_head *uninitialized_var(pl);
- struct intel_engine_cs *engine, *last;
+ struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
const int prio = attr->priority;
+ struct sched_cache cache;
LIST_HEAD(dfs);
/* Needed in order to use the temporary link inside i915_dependency */
@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq,
__list_del_entry(&stack.dfs_link);
}
- last = NULL;
+ memset(&cache, 0, sizeof(cache));
engine = rq->engine;
spin_lock_irq(&engine->timeline.lock);
@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq,
INIT_LIST_HEAD(&dep->dfs_link);
- engine = sched_lock_engine(node, engine);
+ engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock);
/* Recheck after acquiring the engine->timeline.lock */
@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq,
node->attr.priority = prio;
if (!list_empty(&node->link)) {
- if (last != engine) {
- pl = i915_sched_lookup_priolist(engine, prio);
- last = engine;
- }
- list_move_tail(&node->link, pl);
+ if (!cache.priolist)
+ cache.priolist =
+ i915_sched_lookup_priolist(engine,
+ prio);
+ list_move_tail(&node->link, cache.priolist);
} else {
/*
* If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 9726df37c4c4..540e20eb032c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -123,12 +123,6 @@ static inline u64 ptr_to_u64(const void *ptr)
#include <linux/list.h>
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
-{
- return head->next == list;
-}
-
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index de26cd0a5497..5104c6bbd66f 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -984,7 +984,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
- ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
+ ret = component_add_typed(dev_priv->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index cacaa1d04d17..09ed90c0ba00 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * in which case we can skip processing it ourselves.
- */
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags))
- continue;
/*
* Queue for execution after dropping the signaling
@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
+
+ /*
+ * We may race with direct invocation of
+ * dma_fence_signal(), e.g. i915_request_retire(),
+ * so we need to acquire our reference to the request
+ * before we cancel the breadcrumb.
+ */
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ca705546a0ab..14d580cdefd3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ else if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ intel_hdcp_disable(to_intel_connector(conn_state->connector));
}
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
@@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
ret = drm_atomic_commit(state);
- if (ret)
- goto out;
-
- return 0;
-
- out:
+out:
drm_atomic_state_put(state);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index c7c068662288..2220588e86ac 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -26,6 +26,7 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
+#include <drm/i915_drm.h>
enum i915_gpio {
GPIOA,
@@ -150,21 +151,6 @@ enum plane_id {
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
-
- I915_MAX_PORTS
-};
-
-#define port_name(p) ((p) + 'A')
-
/*
* Ports identifier referenced from other drivers.
* Expected to remain stable over time
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 376ffe842e26..e8f694b57b8a 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -357,10 +357,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
+ conn_seq = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
- conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -373,7 +372,8 @@ retry:
if (conn_configured & BIT(i))
continue;
- if (conn_seq == 0 && !connector->has_tile)
+ /* First pass, only consider tiled connectors */
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -477,8 +477,10 @@ retry:
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
+ conn_seq = conn_configured;
goto retry;
+ }
/*
* If the BIOS didn't enable everything it could, fall back to have the
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index d00d0bb07784..7eb58a9d1319 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915,
unsigned int flags,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
- struct igt_spinner **spin_out)
+ struct igt_spinner **spin)
{
- int ret = 0;
-
- if (flags & (TEST_BUSY | TEST_RESET)) {
- struct igt_spinner *spin;
- struct i915_request *rq;
+ struct i915_request *rq;
+ int ret;
- spin = kzalloc(sizeof(*spin), GFP_KERNEL);
- if (!spin) {
- ret = -ENOMEM;
- goto out;
- }
+ *spin = NULL;
+ if (!(flags & (TEST_BUSY | TEST_RESET)))
+ return 0;
- ret = igt_spinner_init(spin, i915);
- if (ret)
- return ret;
+ *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
+ if (!*spin)
+ return -ENOMEM;
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- igt_spinner_fini(spin);
- kfree(spin);
- goto out;
- }
+ ret = igt_spinner_init(*spin, i915);
+ if (ret)
+ goto err_free;
- i915_request_add(rq);
+ rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto err_fini;
+ }
- if (!igt_wait_for_spinner(spin, rq)) {
- pr_err("%s: Spinner failed to start!\n", name);
- igt_spinner_end(spin);
- igt_spinner_fini(spin);
- kfree(spin);
- ret = -ETIMEDOUT;
- goto out;
- }
+ i915_request_add(rq);
- *spin_out = spin;
+ if (!igt_wait_for_spinner(*spin, rq)) {
+ pr_err("%s: Spinner failed to start!\n", name);
+ ret = -ETIMEDOUT;
+ goto err_end;
}
-out:
+ return 0;
+
+err_end:
+ igt_spinner_end(*spin);
+err_fini:
+ igt_spinner_fini(*spin);
+err_free:
+ kfree(fetch_and_zero(spin));
return ret;
}
@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915,
ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
if (ret)
- goto out;
+ goto out_context;
ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
if (ret)
- goto out;
+ goto out_spin;
ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
hweight32(sseu.slice_mask), spin);
-out:
+out_spin:
if (spin) {
igt_spinner_end(spin);
igt_spinner_fini(spin);
kfree(spin);
}
+out_context:
kernel_context_close(kctx);
return ret;
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index c9e439c82241..c3c84a09e628 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -4,7 +4,7 @@ config DRM_IMX
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
@@ -18,6 +18,7 @@ config DRM_IMX_PARALLEL_DISPLAY
config DRM_IMX_TVE
tristate "Support for TV and VGA displays"
depends on DRM_IMX
+ depends on COMMON_CLK
select REGMAP_MMIO
help
Choose this to enable the internal Television Encoder (TVe)
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 44da0f5d0ed9..c935cbe059a7 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -49,11 +49,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
{
int ret;
- ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
- return ret;
-
- ret = drm_atomic_helper_check_planes(dev, state);
+ ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
@@ -229,6 +225,7 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
drm->mode_config.allow_fb_modifiers = true;
+ drm->mode_config.normalize_zpos = true;
drm_mode_config_init(drm);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 3c62167a9251..ec3602ebbc1c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -34,6 +34,7 @@ struct ipu_crtc {
struct ipu_dc *dc;
struct ipu_di *di;
int irq;
+ struct drm_pending_vblank_event *event;
};
static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
@@ -173,8 +174,31 @@ static const struct drm_crtc_funcs ipu_crtc_funcs = {
static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
{
struct ipu_crtc *ipu_crtc = dev_id;
+ struct drm_crtc *crtc = &ipu_crtc->base;
+ unsigned long flags;
+ int i;
+
+ drm_crtc_handle_vblank(crtc);
+
+ if (ipu_crtc->event) {
+ for (i = 0; i < ARRAY_SIZE(ipu_crtc->plane); i++) {
+ struct ipu_plane *plane = ipu_crtc->plane[i];
- drm_crtc_handle_vblank(&ipu_crtc->base);
+ if (!plane)
+ continue;
+
+ if (ipu_plane_atomic_update_pending(&plane->base))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ipu_crtc->plane)) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, ipu_crtc->event);
+ ipu_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
return IRQ_HANDLED;
}
@@ -223,8 +247,10 @@ static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
{
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
+ struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+
WARN_ON(drm_crtc_vblank_get(crtc));
- drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+ ipu_crtc->event = crtc->state->event;
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 21e964f6ab5c..d7a727a6e3d7 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -273,6 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
static void ipu_plane_state_reset(struct drm_plane *plane)
{
+ unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
struct ipu_plane_state *ipu_state;
if (plane->state) {
@@ -284,8 +285,11 @@ static void ipu_plane_state_reset(struct drm_plane *plane)
ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
- if (ipu_state)
+ if (ipu_state) {
__drm_atomic_helper_plane_reset(plane, &ipu_state->base);
+ ipu_state->base.zpos = zpos;
+ ipu_state->base.normalized_zpos = zpos;
+ }
}
static struct drm_plane_state *
@@ -560,6 +564,25 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
ipu_dp_set_window_pos(ipu_plane->dp, dst->x1, dst->y1);
+ switch (ipu_plane->dp_flow) {
+ case IPU_DP_FLOW_SYNC_BG:
+ if (state->normalized_zpos == 1) {
+ ipu_dp_set_global_alpha(ipu_plane->dp,
+ !fb->format->has_alpha, 0xff,
+ true);
+ } else {
+ ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+ }
+ break;
+ case IPU_DP_FLOW_SYNC_FG:
+ if (state->normalized_zpos == 1) {
+ ipu_dp_set_global_alpha(ipu_plane->dp,
+ !fb->format->has_alpha, 0xff,
+ false);
+ }
+ break;
+ }
+
eba = drm_plane_state_to_eba(state, 0);
/*
@@ -582,6 +605,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ ipu_plane->next_buf = !active;
if (ipu_plane_separate_alpha(ipu_plane)) {
active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
@@ -595,34 +619,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
break;
case IPU_DP_FLOW_SYNC_FG:
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
- /* Enable local alpha on partial plane */
- switch (fb->format->format) {
- case DRM_FORMAT_ARGB1555:
- case DRM_FORMAT_ABGR1555:
- case DRM_FORMAT_RGBA5551:
- case DRM_FORMAT_BGRA5551:
- case DRM_FORMAT_ARGB4444:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_RGBA8888:
- case DRM_FORMAT_BGRA8888:
- case DRM_FORMAT_RGB565_A8:
- case DRM_FORMAT_BGR565_A8:
- case DRM_FORMAT_RGB888_A8:
- case DRM_FORMAT_BGR888_A8:
- case DRM_FORMAT_RGBX8888_A8:
- case DRM_FORMAT_BGRX8888_A8:
- ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
- break;
- default:
- ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
- break;
- }
+ break;
}
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
@@ -709,6 +710,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
ipu_plane_enable(ipu_plane);
+ ipu_plane->next_buf = -1;
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
@@ -718,6 +720,24 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
.atomic_update = ipu_plane_atomic_update,
};
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
+
+ /* disabled crtcs must not block the update */
+ if (!state->crtc)
+ return false;
+
+ if (ipu_state->use_pre)
+ return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch);
+ else if (ipu_plane->next_buf >= 0)
+ return ipu_idmac_get_current_buffer(ipu_plane->ipu_ch) !=
+ ipu_plane->next_buf;
+
+ return false;
+}
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -806,6 +826,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
{
struct ipu_plane *ipu_plane;
const uint64_t *modifiers = ipu_format_modifiers;
+ unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
int ret;
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
@@ -836,5 +857,10 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+ if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
+ drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+ else
+ drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+
return ipu_plane;
}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index e563ea17a827..15e85e15d35c 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -27,6 +27,7 @@ struct ipu_plane {
int dp_flow;
bool disabling;
+ int next_buf;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,5 +49,6 @@ int ipu_plane_irq(struct ipu_plane *plane);
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
void ipu_plane_disable_deferred(struct drm_plane *plane);
+bool ipu_plane_atomic_update_pending(struct drm_plane *plane);
#endif
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index cf549f1ed403..78c9e5a5e793 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,6 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
+ depends on INTERCONNECT || !INTERCONNECT
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index ce1b3cc4bf6d..d1662a75c7ec 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@@ -84,6 +85,9 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
@@ -106,6 +110,12 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
gmu->freq = gmu->gpu_freqs[index];
+
+ /*
+ * Eventually we will want to scale the path vote with the frequency but
+ * for now leave it at max so that the performance is nominal.
+ */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216));
}
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
@@ -705,6 +715,8 @@ out:
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
int status, ret;
@@ -720,6 +732,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
if (ret)
goto out;
+ /* Set the bus quota to a reasonable value for boot */
+ icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
+
a6xx_gmu_irq_enable(gmu);
/* Check to see if we are doing a cold or warm boot */
@@ -760,6 +775,8 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 val;
@@ -806,6 +823,9 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+ /* Remove the bus vote */
+ icc_set_bw(gpu->icc_path, 0, 0);
+
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2cfee1a4fe0b..27898475cdf4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -18,6 +18,7 @@
*/
#include <linux/ascii85.h>
+#include <linux/interconnect.h>
#include <linux/kernel.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
@@ -747,6 +748,11 @@ static int adreno_get_pwrlevels(struct device *dev,
DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+ /* Check for an interconnect path for the bus */
+ gpu->icc_path = of_icc_get(dev, NULL);
+ if (IS_ERR(gpu->icc_path))
+ gpu->icc_path = NULL;
+
return 0;
}
@@ -787,10 +793,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
+ struct msm_gpu *gpu = &adreno_gpu->base;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
+ icc_put(gpu->icc_path);
+
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d130825e2c75..b776fca571f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -465,8 +465,6 @@ static void _dpu_crtc_setup_mixer_for_encoder(
return;
}
- mixer->encoder = enc;
-
cstate->num_mixers++;
DPU_DEBUG("setup mixer %d: lm %d\n",
i, mixer->hw_lm->idx - LM_0);
@@ -718,11 +716,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
* may delay and flush at an irq event (e.g. ppdone)
*/
drm_for_each_encoder_mask(encoder, crtc->dev,
- crtc->state->encoder_mask) {
- struct dpu_encoder_kickoff_params params = { 0 };
- dpu_encoder_prepare_for_kickoff(encoder, &params, async);
- }
-
+ crtc->state->encoder_mask)
+ dpu_encoder_prepare_for_kickoff(encoder, async);
if (!async) {
/* wait for frame_event_done completion */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index dbfb38a1986c..e59d62be4980 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -84,14 +84,12 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
- * @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl;
- struct drm_encoder *encoder;
u32 mixer_op_mode;
u32 flush_mask;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 36af231bb73f..5aa3307f3f0c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -205,7 +205,7 @@ struct dpu_encoder_virt {
bool idle_pc_supported;
struct mutex rc_lock;
enum dpu_enc_rc_states rc_state;
- struct kthread_delayed_work delayed_off_work;
+ struct delayed_work delayed_off_work;
struct kthread_work vsync_event_work;
struct msm_display_topology topology;
bool mode_set_complete;
@@ -742,7 +742,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
{
struct dpu_encoder_virt *dpu_enc;
struct msm_drm_private *priv;
- struct msm_drm_thread *disp_thread;
bool is_vid_mode = false;
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
@@ -755,12 +754,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
is_vid_mode = dpu_enc->disp_info.capabilities &
MSM_DISPLAY_CAP_VID_MODE;
- if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
- DPU_ERROR("invalid crtc index\n");
- return -EINVAL;
- }
- disp_thread = &priv->disp_thread[drm_enc->crtc->index];
-
/*
* when idle_pc is not supported, process only KICKOFF, STOP and MODESET
* events and return early for other events (ie wb display).
@@ -777,8 +770,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
switch (sw_event) {
case DPU_ENC_RC_EVENT_KICKOFF:
/* cancel delayed off work, if any */
- if (kthread_cancel_delayed_work_sync(
- &dpu_enc->delayed_off_work))
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -837,10 +829,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
- kthread_queue_delayed_work(
- &disp_thread->worker,
- &dpu_enc->delayed_off_work,
- msecs_to_jiffies(dpu_enc->idle_timeout));
+ queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
dpu_enc->idle_pc_supported, dpu_enc->rc_state,
@@ -849,8 +839,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
case DPU_ENC_RC_EVENT_PRE_STOP:
/* cancel delayed off work, if any */
- if (kthread_cancel_delayed_work_sync(
- &dpu_enc->delayed_off_work))
+ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
@@ -1368,7 +1357,7 @@ static void dpu_encoder_frame_done_callback(
}
}
-static void dpu_encoder_off_work(struct kthread_work *work)
+static void dpu_encoder_off_work(struct work_struct *work)
{
struct dpu_encoder_virt *dpu_enc = container_of(work,
struct dpu_encoder_virt, delayed_off_work.work);
@@ -1756,15 +1745,14 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
- struct dpu_encoder_kickoff_params *params, bool async)
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
bool needs_hw_reset = false;
unsigned int i;
- if (!drm_enc || !params) {
+ if (!drm_enc) {
DPU_ERROR("invalid args\n");
return;
}
@@ -1778,7 +1766,7 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
phys = dpu_enc->phys_encs[i];
if (phys) {
if (phys->ops.prepare_for_kickoff)
- phys->ops.prepare_for_kickoff(phys, params);
+ phys->ops.prepare_for_kickoff(phys);
if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
}
@@ -2193,7 +2181,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
mutex_init(&dpu_enc->rc_lock);
- kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 3f5dafe00580..d77f74fb26d4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -38,15 +38,6 @@ struct dpu_encoder_hw_resources {
};
/**
- * dpu_encoder_kickoff_params - info encoder requires at kickoff
- * @affected_displays: bitmask, bit set means the ROI of the commit lies within
- * the bounds of the physical display at the bit index
- */
-struct dpu_encoder_kickoff_params {
- unsigned long affected_displays;
-};
-
-/**
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
@@ -88,11 +79,9 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Immediately: if no previous commit is outstanding.
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
- * @params: kickoff time parameters
* @async: true if this is an asynchronous commit
*/
-void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
- struct dpu_encoder_kickoff_params *params, bool async);
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, bool async);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 44e6f8b68e70..db94f3d3bea3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -144,8 +144,7 @@ struct dpu_encoder_phys_ops {
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
- void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 99ab5ca9bed3..a399e1edd313 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -594,8 +594,7 @@ static void dpu_encoder_phys_cmd_get_hw_resources(
}
static void dpu_encoder_phys_cmd_prepare_for_kickoff(
- struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
@@ -693,7 +692,7 @@ static int dpu_encoder_phys_cmd_wait_for_commit_done(
/* required for both controllers */
if (!rc && cmd_enc->serialize_wait4pp)
- dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc);
return rc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index acdab5b0db18..3c4eb470a82c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -587,14 +587,13 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
}
static void dpu_encoder_phys_vid_prepare_for_kickoff(
- struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
- if (!phys_enc || !params) {
+ if (!phys_enc) {
DPU_ERROR("invalid encoder/parameters\n");
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 0874f0a53bf9..f59fe1a9f4b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -263,13 +263,13 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
@@ -1137,36 +1137,3 @@ const struct msm_format *dpu_get_msm_format(
return &fmt->base;
return NULL;
}
-
-uint32_t dpu_populate_formats(
- const struct dpu_format_extended *format_list,
- uint32_t *pixel_formats,
- uint64_t *pixel_modifiers,
- uint32_t pixel_formats_max)
-{
- uint32_t i, fourcc_format;
-
- if (!format_list || !pixel_formats)
- return 0;
-
- for (i = 0, fourcc_format = 0;
- format_list->fourcc_format && i < pixel_formats_max;
- ++format_list) {
- /* verify if listed format is in dpu_format_map? */
-
- /* optionally return modified formats */
- if (pixel_modifiers) {
- /* assume same modifier for all fb planes */
- pixel_formats[i] = format_list->fourcc_format;
- pixel_modifiers[i++] = format_list->modifier;
- } else {
- /* assume base formats grouped together */
- if (fourcc_format != format_list->fourcc_format) {
- fourcc_format = format_list->fourcc_format;
- pixel_formats[i++] = fourcc_format;
- }
- }
- }
-
- return i;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index a54451d8d011..c02c81e7a667 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -41,20 +41,6 @@ const struct msm_format *dpu_get_msm_format(
const uint64_t modifiers);
/**
- * dpu_populate_formats - populate the given array with fourcc codes supported
- * @format_list: pointer to list of possible formats
- * @pixel_formats: array to populate with fourcc codes
- * @pixel_modifiers: array to populate with drm modifiers, can be NULL
- * @pixel_formats_max: length of pixel formats array
- * Return: number of elements populated
- */
-uint32_t dpu_populate_formats(
- const struct dpu_format_extended *format_list,
- uint32_t *pixel_formats,
- uint64_t *pixel_modifiers,
- uint32_t pixel_formats_max);
-
-/**
* dpu_format_check_modified_format - validate format and buffers for
* dpu non-standard, i.e. modified format
* @kms: kms driver
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 512ac0834d2b..df6852cc98b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -151,7 +151,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.id = DPU_SSPP_CSC_10BIT, \
.base = 0x1a00, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
#define _DMA_SBLK(num, sdma_pri) \
@@ -163,7 +165,9 @@ static const struct dpu_sspp_blks_common sdm845_sspp_common = {
.src_blk = {.name = STRCAT("sspp_src_", num), \
.id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
.format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
.virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
}
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 144358a3d0fb..a55653b2e466 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -252,17 +252,6 @@ struct dpu_pp_blk {
};
/**
- * struct dpu_format_extended - define dpu specific pixel format+modifier
- * @fourcc_format: Base FOURCC pixel format code
- * @modifier: 64-bit drm format modifier, same modifier must be applied to all
- * framebuffer planes
- */
-struct dpu_format_extended {
- uint32_t fourcc_format;
- uint64_t modifier;
-};
-
-/**
* enum dpu_qos_lut_usage - define QoS LUT use cases
*/
enum dpu_qos_lut_usage {
@@ -348,7 +337,9 @@ struct dpu_sspp_blks_common {
* @pcc_blk:
* @igc_blk:
* @format_list: Pointer to list of supported formats
+ * @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
+ * @virt_num_formats: Number of supported formats for virtual planes
*/
struct dpu_sspp_sub_blks {
const struct dpu_sspp_blks_common *common;
@@ -366,8 +357,10 @@ struct dpu_sspp_sub_blks {
struct dpu_pp_blk pcc_blk;
struct dpu_pp_blk igc_blk;
- const struct dpu_format_extended *format_list;
- const struct dpu_format_extended *virt_format_list;
+ const u32 *format_list;
+ u32 num_formats;
+ const u32 *virt_format_list;
+ u32 virt_num_formats;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
index 3c9f028628ef..d09730985951 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -12,157 +12,81 @@
#include "dpu_hw_mdss.h"
-static const struct dpu_format_extended plane_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {0, 0},
+static const uint32_t qcom_compressed_supported_formats[] = {
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_BGR565,
};
-static const struct dpu_format_extended plane_formats_yuv[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
-
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV21, 0},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_NV61, 0},
- {DRM_FORMAT_VYUY, 0},
- {DRM_FORMAT_UYVY, 0},
- {DRM_FORMAT_YUYV, 0},
- {DRM_FORMAT_YVYU, 0},
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_YVU420, 0},
- {0, 0},
-};
-
-static const struct dpu_format_extended cursor_formats[] = {
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {0, 0},
+static const uint32_t plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
};
-static const struct dpu_format_extended wb2_formats[] = {
- {DRM_FORMAT_RGB565, 0},
- {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_RGB888, 0},
- {DRM_FORMAT_ARGB8888, 0},
- {DRM_FORMAT_RGBA8888, 0},
- {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_XRGB8888, 0},
- {DRM_FORMAT_RGBX8888, 0},
- {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_ARGB1555, 0},
- {DRM_FORMAT_RGBA5551, 0},
- {DRM_FORMAT_XRGB1555, 0},
- {DRM_FORMAT_RGBX5551, 0},
- {DRM_FORMAT_ARGB4444, 0},
- {DRM_FORMAT_RGBA4444, 0},
- {DRM_FORMAT_RGBX4444, 0},
- {DRM_FORMAT_XRGB4444, 0},
-
- {DRM_FORMAT_BGR565, 0},
- {DRM_FORMAT_BGR888, 0},
- {DRM_FORMAT_ABGR8888, 0},
- {DRM_FORMAT_BGRA8888, 0},
- {DRM_FORMAT_BGRX8888, 0},
- {DRM_FORMAT_XBGR8888, 0},
- {DRM_FORMAT_ABGR1555, 0},
- {DRM_FORMAT_BGRA5551, 0},
- {DRM_FORMAT_XBGR1555, 0},
- {DRM_FORMAT_BGRX5551, 0},
- {DRM_FORMAT_ABGR4444, 0},
- {DRM_FORMAT_BGRA4444, 0},
- {DRM_FORMAT_BGRX4444, 0},
- {DRM_FORMAT_XBGR4444, 0},
-
- {DRM_FORMAT_YUV420, 0},
- {DRM_FORMAT_NV12, 0},
- {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_NV16, 0},
- {DRM_FORMAT_YUYV, 0},
-
- {0, 0},
-};
+static const uint32_t plane_formats_yuv[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
-static const struct dpu_format_extended rgb_10bit_formats[] = {
- {DRM_FORMAT_BGRA1010102, 0},
- {DRM_FORMAT_BGRX1010102, 0},
- {DRM_FORMAT_RGBA1010102, 0},
- {DRM_FORMAT_RGBX1010102, 0},
- {DRM_FORMAT_ABGR2101010, 0},
- {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_XBGR2101010, 0},
- {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
- {DRM_FORMAT_ARGB2101010, 0},
- {DRM_FORMAT_XRGB2101010, 0},
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c0b7f0049365..8a28a03ac6a9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -170,10 +170,6 @@
/**
* AD4 interrupt status bit definitions
*/
-#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
-#define DPU_INTR_DARKENH_UPDATED BIT(3)
-#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
-#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
/**
* struct dpu_intr_reg - array of DPU register sets
@@ -782,18 +778,6 @@ static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
return -EINVAL;
}
-static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
- uint32_t mask)
-{
- if (!intr)
- return;
-
- DPU_REG_WRITE(&intr->hw, reg_off, mask);
-
- /* ensure register writes go through */
- wmb();
-}
-
static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
void (*cbfunc)(void *, int),
void *arg)
@@ -1004,18 +988,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
return 0;
}
-static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
- uint32_t *mask)
-{
- if (!intr || !mask)
- return -EINVAL;
-
- *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
- | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
-
- return 0;
-}
-
static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
{
int i;
@@ -1065,19 +1037,6 @@ static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
wmb();
}
-static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
- int irq_idx)
-{
- unsigned long irq_flags;
-
- if (!intr)
- return;
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
- dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx, bool clear)
{
@@ -1113,16 +1072,13 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
{
- ops->set_mask = dpu_hw_intr_set_mask;
ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
ops->enable_irq = dpu_hw_intr_enable_irq;
ops->disable_irq = dpu_hw_intr_disable_irq;
ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
- ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
- ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 61e4cba36562..4d7a1c727ce2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -20,13 +20,6 @@
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
-#define IRQ_SOURCE_MDP BIT(0)
-#define IRQ_SOURCE_DSI0 BIT(4)
-#define IRQ_SOURCE_DSI1 BIT(5)
-#define IRQ_SOURCE_HDMI BIT(8)
-#define IRQ_SOURCE_EDP BIT(12)
-#define IRQ_SOURCE_MHL BIT(16)
-
/**
* dpu_intr_type - HW Interrupt Type
* @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
@@ -96,18 +89,6 @@ struct dpu_hw_intr;
*/
struct dpu_hw_intr_ops {
/**
- * set_mask - Programs the given interrupt register with the
- * given interrupt mask. Register value will get overwritten.
- * @intr: HW interrupt handle
- * @reg_off: MDSS HW register offset
- * @irqmask: IRQ mask value
- */
- void (*set_mask)(
- struct dpu_hw_intr *intr,
- uint32_t reg,
- uint32_t irqmask);
-
- /**
* irq_idx_lookup - Lookup IRQ index on the HW interrupt type
* Used for all irq related ops
* @intr_type: Interrupt type defined in dpu_intr_type
@@ -177,16 +158,6 @@ struct dpu_hw_intr_ops {
struct dpu_hw_intr *intr);
/**
- * clear_interrupt_status - Clears HW interrupt status based on given
- * lookup IRQ index.
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- */
- void (*clear_interrupt_status)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
* clear_intr_status_nolock() - clears the HW interrupts without lock
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
@@ -206,21 +177,6 @@ struct dpu_hw_intr_ops {
struct dpu_hw_intr *intr,
int irq_idx,
bool clear);
-
- /**
- * get_valid_interrupts - Gets a mask of all valid interrupt sources
- * within DPU. These are actually status bits
- * within interrupt registers that specify the
- * source of the interrupt in IRQs. For example,
- * valid interrupt sources can be MDP, DSI,
- * HDMI etc.
- * @intr: HW interrupt handle
- * @mask: Returning the interrupt source MASK
- * @return: 0 for success, otherwise failure
- */
- int (*get_valid_interrupts)(
- struct dpu_hw_intr *intr,
- uint32_t *mask);
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 68c54d2c9677..1ab8d4a889f7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -258,12 +258,6 @@ enum dpu_vbif {
VBIF_NRT = VBIF_1
};
-enum dpu_iommu_domain {
- DPU_IOMMU_DOMAIN_UNSECURE,
- DPU_IOMMU_DOMAIN_SECURE,
- DPU_IOMMU_DOMAIN_MAX
-};
-
/**
* DPU HW,Component order color map
*/
@@ -358,7 +352,6 @@ enum dpu_3d_blend_mode {
* @alpha_enable: whether the format has an alpha channel
* @num_planes: number of planes (including meta data planes)
* @fetch_mode: linear, tiled, or ubwc hw fetch behavior
- * @is_yuv: is format a yuv variant
* @flag: usage bit flags
* @tile_width: format tile width
* @tile_height: format tile height
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 321fc64ddd0e..efe70c508ee0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -18,7 +18,6 @@
#include "dpu_hw_mdss.h"
#define REG_MASK(n) ((BIT(n)) - 1)
-struct dpu_format_extended;
/*
* This is the common struct maintained by each sub block
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 4d67b3c96702..885bf88afa3e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -405,35 +405,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
}
}
-static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
- int i, rc;
+ int i, rc = 0;
+
+ if (!(priv->dsi[0] || priv->dsi[1]))
+ return rc;
/*TODO: Support two independent DSI connectors */
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
- if (IS_ERR_OR_NULL(encoder)) {
+ if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
- return;
+ return PTR_ERR(encoder);
}
priv->encoders[priv->num_encoders++] = encoder;
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
- if (!priv->dsi[i]) {
- DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
- return;
- }
+ if (!priv->dsi[i])
+ continue;
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
- continue;
+ break;
}
}
+
+ return rc;
}
/**
@@ -444,16 +447,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
* @dpu_kms: Pointer to dpu kms structure
* Returns: Zero on success
*/
-static void _dpu_kms_setup_displays(struct drm_device *dev,
+static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
- _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
-
/**
* Extend this function to initialize other
* types of displays
*/
+
+ return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
}
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@@ -516,7 +519,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
* Create encoder and query display drivers to create
* bridges and connectors
*/
- _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
+ if (ret)
+ goto fail;
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
@@ -627,6 +632,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
dpu_kms->vbif[VBIF_RT] = NULL;
+ if (dpu_kms->hw_mdp)
+ dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
+ dpu_kms->hw_mdp = NULL;
+
if (dpu_kms->mmio)
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
dpu_kms->mmio = NULL;
@@ -877,8 +886,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
- dpu_kms->dev);
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
goto power_error;
@@ -886,11 +894,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->rm_init = true;
- dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
- if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
+ dpu_kms->catalog);
+ if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
- if (!dpu_kms->hw_mdp)
- rc = -EINVAL;
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
dpu_kms->hw_mdp = NULL;
goto power_error;
@@ -926,16 +933,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto hw_intr_init_err;
}
- /*
- * _dpu_kms_drm_obj_init should create the DRM related objects
- * i.e. CRTCs, planes, encoders, connectors and so forth
- */
- rc = _dpu_kms_drm_obj_init(dpu_kms);
- if (rc) {
- DPU_ERROR("modeset init failed: %d\n", rc);
- goto drm_obj_init_err;
- }
-
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
@@ -952,6 +949,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index cb307a2abf06..7316b4ab1b85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -23,11 +23,14 @@ struct dpu_mdss {
struct dpu_irq_controller irq_controller;
};
-static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+static void dpu_mdss_irq(struct irq_desc *desc)
{
- struct dpu_mdss *dpu_mdss = arg;
+ struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
u32 interrupts;
+ chained_irq_enter(chip, desc);
+
interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
while (interrupts) {
@@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
hwirq);
if (mapping == 0) {
DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
- return IRQ_NONE;
+ break;
}
rc = generic_handle_irq(mapping);
if (rc < 0) {
DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
hwirq, mapping, rc);
- return IRQ_NONE;
+ break;
}
interrupts &= ~(1 << hwirq);
}
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static void dpu_mdss_irq_mask(struct irq_data *irqd)
@@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
.irq_unmask = dpu_mdss_irq_unmask,
};
+static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
+
static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct dpu_mdss *dpu_mdss = domain->host_data;
- int ret;
+ irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
- ret = irq_set_chip_data(irq, dpu_mdss);
-
- return ret;
+ return irq_set_chip_data(irq, dpu_mdss);
}
static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
@@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
+ int irq;
pm_runtime_suspend(dev->dev);
pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
- free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+ irq = platform_get_irq(pdev, 0);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
@@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
struct dpu_mdss *dpu_mdss;
struct dss_module_power *mp;
int ret = 0;
+ int irq;
dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
if (!dpu_mdss)
@@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
if (ret)
goto irq_domain_error;
- ret = request_irq(platform_get_irq(pdev, 0),
- dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
- if (ret) {
- DPU_ERROR("failed to init irq: %d\n", ret);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
goto irq_error;
- }
+
+ irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
+ dpu_mdss);
pm_runtime_enable(dev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6aefcd6db46b..b01183b309b9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -95,8 +95,6 @@ struct dpu_plane {
enum dpu_sspp pipe;
uint32_t features; /* capabilities from catalog */
- uint32_t nformats;
- uint32_t formats[64];
struct dpu_hw_pipe *pipe_hw;
struct dpu_hw_pipe_cfg pipe_cfg;
@@ -121,6 +119,12 @@ struct dpu_plane {
bool debugfs_default_scale;
};
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_QCOM_COMPRESSED,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
@@ -1410,6 +1414,23 @@ static void dpu_plane_early_unregister(struct drm_plane *plane)
debugfs_remove_recursive(pdpu->debugfs_root);
}
+static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) {
+ if (format == qcom_compressed_supported_formats[i])
+ return true;
+ }
+ }
+
+ return false;
+}
+
static const struct drm_plane_funcs dpu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -1419,6 +1440,7 @@ static const struct drm_plane_funcs dpu_plane_funcs = {
.atomic_destroy_state = dpu_plane_destroy_state,
.late_register = dpu_plane_late_register,
.early_unregister = dpu_plane_early_unregister,
+ .format_mod_supported = dpu_plane_format_mod_supported,
};
static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
@@ -1444,11 +1466,12 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
unsigned long possible_crtcs, u32 master_plane_id)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
- const struct dpu_format_extended *format_list;
+ const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
+ uint32_t num_formats;
int ret = -EINVAL;
/* create and zero local structure */
@@ -1491,24 +1514,18 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (!master_plane_id)
- format_list = pdpu->pipe_sblk->format_list;
- else
+ if (pdpu->is_virtual) {
format_list = pdpu->pipe_sblk->virt_format_list;
-
- pdpu->nformats = dpu_populate_formats(format_list,
- pdpu->formats,
- 0,
- ARRAY_SIZE(pdpu->formats));
-
- if (!pdpu->nformats) {
- DPU_ERROR("[%u]no valid formats for plane\n", pipe);
- goto clean_sspp;
+ num_formats = pdpu->pipe_sblk->virt_num_formats;
+ }
+ else {
+ format_list = pdpu->pipe_sblk->format_list;
+ num_formats = pdpu->pipe_sblk->num_formats;
}
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
- pdpu->formats, pdpu->nformats,
- NULL, type, NULL);
+ format_list, num_formats,
+ supported_format_modifiers, type, NULL);
if (ret)
goto clean_sspp;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 7fed0b627708..0e6063acd041 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -28,23 +28,18 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @property_state: Local storage for msm_prop properties
- * @property_values: cached plane property values
* @aspace: pointer to address space for input/output buffers
- * @input_fence: dereferenced input fence pointer
* @stage: assigned by crtc blender
* @multirect_index: index of the rectangle of SSPP
* @multirect_mode: parallel or time multiplex multirect mode
* @pending: whether the current update is still pending
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
- * @scaler_check_state: indicates status of user provided pixel extension data
* @cdp_cfg: CDP configuration
*/
struct dpu_plane_state {
struct drm_plane_state base;
struct msm_gem_address_space *aspace;
- void *input_fence;
enum dpu_stage stage;
uint32_t multirect_index;
uint32_t multirect_mode;
@@ -107,12 +102,6 @@ void dpu_plane_restore(struct drm_plane *plane);
void dpu_plane_flush(struct drm_plane *plane);
/**
- * dpu_plane_kickoff - final plane operations before commit kickoff
- * @plane: Pointer to drm plane structure
- */
-void dpu_plane_kickoff(struct drm_plane *plane);
-
-/**
* dpu_plane_set_error: enable/disable error condition
* @plane: pointer to drm_plane structure
*/
@@ -147,14 +136,6 @@ int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
/**
- * dpu_plane_wait_input_fence - wait for input fence object
- * @plane: Pointer to DRM plane object
- * @wait_ms: Wait timeout value
- * Returns: Zero on success
- */
-int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
-
-/**
* dpu_plane_color_fill - enables color fill on plane
* @plane: Pointer to DRM plane object
* @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
@@ -164,12 +145,4 @@ int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
-/**
- * dpu_plane_set_revalidate - sets revalidate flag which forces a full
- * validation of the plane properties in the next atomic check
- * @plane: Pointer to DRM plane object
- * @enable: Boolean to set/unset the flag
- */
-void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
-
#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index bdb117709674..037d9f4187f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -21,8 +21,8 @@
#include "dpu_encoder.h"
#include "dpu_trace.h"
-#define RESERVED_BY_OTHER(h, r) \
- ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->enc_id && (h)->enc_id != r)
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
@@ -34,90 +34,21 @@ struct dpu_rm_requirements {
struct dpu_encoder_hw_resources hw_res;
};
-/**
- * struct dpu_rm_rsvp - Use Case Reservation tagging structure
- * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
- * By using as a tag, rather than lists of pointers to HW blocks used
- * we can avoid some list management since we don't know how many blocks
- * of each type a given use case may require.
- * @list: List head for list of all reservations
- * @seq: Global RSVP sequence number for debugging, especially for
- * differentiating differenct allocations for same encoder.
- * @enc_id: Reservations are tracked by Encoder DRM object ID.
- * CRTCs may be connected to multiple Encoders.
- * An encoder or connector id identifies the display path.
- */
-struct dpu_rm_rsvp {
- struct list_head list;
- uint32_t seq;
- uint32_t enc_id;
-};
/**
* struct dpu_rm_hw_blk - hardware block tracking list member
* @list: List head for list of all hardware blocks tracking items
- * @rsvp: Pointer to use case reservation if reserved by a client
- * @rsvp_nxt: Temporary pointer used during reservation to the incoming
- * request. Will be swapped into rsvp if proposal is accepted
- * @type: Type of hardware block this structure tracks
* @id: Hardware ID number, within it's own space, ie. LM_X
- * @catalog: Pointer to the hardware catalog entry for this block
+ * @enc_id: Encoder id to which this blk is binded
* @hw: Pointer to the hardware register access object for this block
*/
struct dpu_rm_hw_blk {
struct list_head list;
- struct dpu_rm_rsvp *rsvp;
- struct dpu_rm_rsvp *rsvp_nxt;
- enum dpu_hw_blk_type type;
uint32_t id;
+ uint32_t enc_id;
struct dpu_hw_blk *hw;
};
-/**
- * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
- */
-enum dpu_rm_dbg_rsvp_stage {
- DPU_RM_STAGE_BEGIN,
- DPU_RM_STAGE_AFTER_CLEAR,
- DPU_RM_STAGE_AFTER_RSVPNEXT,
- DPU_RM_STAGE_FINAL
-};
-
-static void _dpu_rm_print_rsvps(
- struct dpu_rm *rm,
- enum dpu_rm_dbg_rsvp_stage stage)
-{
- struct dpu_rm_rsvp *rsvp;
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- DPU_DEBUG("%d\n", stage);
-
- list_for_each_entry(rsvp, &rm->rsvps, list) {
- DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
- rsvp->enc_id);
- }
-
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (!blk->rsvp && !blk->rsvp_nxt)
- continue;
-
- DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
- (blk->rsvp) ? blk->rsvp->seq : 0,
- (blk->rsvp) ? blk->rsvp->enc_id : 0,
- (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
- (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
- blk->type, blk->id);
- }
- }
-}
-
-struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
-{
- return rm->hw_mdp;
-}
-
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
@@ -148,15 +79,7 @@ static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
i->blk = list_prepare_entry(i->blk, blk_list, list);
list_for_each_entry_continue(i->blk, blk_list, list) {
- struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
-
- if (i->blk->type != i->type) {
- DPU_ERROR("found incorrect block type %d on %d list\n",
- i->blk->type, i->type);
- return false;
- }
-
- if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ if (i->enc_id == i->blk->enc_id) {
i->hw = i->blk->hw;
DPU_DEBUG("found type %d id %d for enc %d\n",
i->type, i->blk->id, i->enc_id);
@@ -208,34 +131,18 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
int dpu_rm_destroy(struct dpu_rm *rm)
{
-
- struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
enum dpu_hw_blk_type type;
- if (!rm) {
- DPU_ERROR("invalid rm\n");
- return -EINVAL;
- }
-
- list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
- list_del(&rsvp_cur->list);
- kfree(rsvp_cur);
- }
-
-
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
list) {
list_del(&hw_cur->list);
- _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ _dpu_rm_hw_destroy(type, hw_cur->hw);
kfree(hw_cur);
}
}
- dpu_hw_mdp_destroy(rm->hw_mdp);
- rm->hw_mdp = NULL;
-
mutex_destroy(&rm->rm_lock);
return 0;
@@ -250,11 +157,8 @@ static int _dpu_rm_hw_blk_create(
void *hw_catalog_info)
{
struct dpu_rm_hw_blk *blk;
- struct dpu_hw_mdp *hw_mdp;
void *hw;
- hw_mdp = rm->hw_mdp;
-
switch (type) {
case DPU_HW_BLK_LM:
hw = dpu_hw_lm_init(id, mmio, cat);
@@ -290,9 +194,9 @@ static int _dpu_rm_hw_blk_create(
return -ENOMEM;
}
- blk->type = type;
blk->id = id;
blk->hw = hw;
+ blk->enc_id = 0;
list_add_tail(&blk->list, &rm->hw_blks[type]);
return 0;
@@ -300,13 +204,12 @@ static int _dpu_rm_hw_blk_create(
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- struct drm_device *dev)
+ void __iomem *mmio)
{
int rc, i;
enum dpu_hw_blk_type type;
- if (!rm || !cat || !mmio || !dev) {
+ if (!rm || !cat || !mmio) {
DPU_ERROR("invalid kms\n");
return -EINVAL;
}
@@ -316,21 +219,9 @@ int dpu_rm_init(struct dpu_rm *rm,
mutex_init(&rm->rm_lock);
- INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < DPU_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
- rm->dev = dev;
-
- /* Some of the sub-blocks require an mdptop to be created */
- rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
- if (IS_ERR_OR_NULL(rm->hw_mdp)) {
- rc = PTR_ERR(rm->hw_mdp);
- rm->hw_mdp = NULL;
- DPU_ERROR("failed: mdp hw not available\n");
- goto fail;
- }
-
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct dpu_lm_cfg *lm = &cat->mixer[i];
@@ -410,7 +301,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* proposed use case requirements, incl. hardwired dependent blocks like
* pingpong
* @rm: dpu resource manager handle
- * @rsvp: reservation currently being created
+ * @enc_id: encoder id requesting for allocation
* @reqs: proposed use case requirements
* @lm: proposed layer mixer, function checks if lm, and all other hardwired
* blocks connected to the lm (pp) is available and appropriate
@@ -422,7 +313,7 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
struct dpu_rm_requirements *reqs,
struct dpu_rm_hw_blk *lm,
struct dpu_rm_hw_blk **pp,
@@ -449,7 +340,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
}
/* Already reserved? */
- if (RESERVED_BY_OTHER(lm, rsvp)) {
+ if (RESERVED_BY_OTHER(lm, enc_id)) {
DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
return false;
}
@@ -467,7 +358,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return false;
}
- if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ if (RESERVED_BY_OTHER(*pp, enc_id)) {
DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
(*pp)->id);
return false;
@@ -476,10 +367,8 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(
return true;
}
-static int _dpu_rm_reserve_lms(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct dpu_rm_requirements *reqs)
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id,
+ struct dpu_rm_requirements *reqs)
{
struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
@@ -504,7 +393,7 @@ static int _dpu_rm_reserve_lms(
lm[lm_count] = iter_i.blk;
if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, rsvp, reqs, lm[lm_count],
+ rm, enc_id, reqs, lm[lm_count],
&pp[lm_count], NULL))
continue;
@@ -519,7 +408,7 @@ static int _dpu_rm_reserve_lms(
continue;
if (!_dpu_rm_check_lm_and_get_connected_blks(
- rm, rsvp, reqs, iter_j.blk,
+ rm, enc_id, reqs, iter_j.blk,
&pp[lm_count], iter_i.blk))
continue;
@@ -537,11 +426,10 @@ static int _dpu_rm_reserve_lms(
if (!lm[i])
break;
- lm[i]->rsvp_nxt = rsvp;
- pp[i]->rsvp_nxt = rsvp;
+ lm[i]->enc_id = enc_id;
+ pp[i]->enc_id = enc_id;
- trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
- pp[i]->id);
+ trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id);
}
return rc;
@@ -549,7 +437,7 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
@@ -570,7 +458,7 @@ static int _dpu_rm_reserve_ctls(
unsigned long features = ctl->caps->features;
bool has_split_display;
- if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ if (RESERVED_BY_OTHER(iter.blk, enc_id))
continue;
has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
@@ -591,9 +479,8 @@ static int _dpu_rm_reserve_ctls(
return -ENAVAIL;
for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
- ctls[i]->rsvp_nxt = rsvp;
- trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
- rsvp->enc_id);
+ ctls[i]->enc_id = enc_id;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id);
}
return 0;
@@ -601,7 +488,7 @@ static int _dpu_rm_reserve_ctls(
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
uint32_t id,
enum dpu_hw_blk_type type)
{
@@ -614,14 +501,13 @@ static int _dpu_rm_reserve_intf(
if (iter.blk->id != id)
continue;
- if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ if (RESERVED_BY_OTHER(iter.blk, enc_id)) {
DPU_ERROR("type %d id %d already reserved\n", type, id);
return -ENAVAIL;
}
- iter.blk->rsvp_nxt = rsvp;
- trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
- rsvp->enc_id);
+ iter.blk->enc_id = enc_id;
+ trace_dpu_rm_reserve_intf(iter.blk->id, enc_id);
break;
}
@@ -636,7 +522,7 @@ static int _dpu_rm_reserve_intf(
static int _dpu_rm_reserve_intf_related_hw(
struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
+ uint32_t enc_id,
struct dpu_encoder_hw_resources *hw_res)
{
int i, ret = 0;
@@ -646,7 +532,7 @@ static int _dpu_rm_reserve_intf_related_hw(
if (hw_res->intfs[i] == INTF_MODE_NONE)
continue;
id = i + INTF_0;
- ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ ret = _dpu_rm_reserve_intf(rm, enc_id, id,
DPU_HW_BLK_INTF);
if (ret)
return ret;
@@ -655,33 +541,27 @@ static int _dpu_rm_reserve_intf_related_hw(
return ret;
}
-static int _dpu_rm_make_next_rsvp(
+static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{
int ret;
- /* Create reservation info, tag reserved blocks with it as we go */
- rsvp->seq = ++rm->rsvp_next_seq;
- rsvp->enc_id = enc->base.id;
- list_add_tail(&rsvp->list, &rm->rsvps);
-
- ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
- ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
+ ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res);
if (ret)
return ret;
@@ -706,108 +586,31 @@ static int _dpu_rm_populate_requirements(
return 0;
}
-static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
- struct dpu_rm *rm,
- struct drm_encoder *enc)
+static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id)
{
- struct dpu_rm_rsvp *i;
-
- if (!rm || !enc) {
- DPU_ERROR("invalid params\n");
- return NULL;
- }
-
- if (list_empty(&rm->rsvps))
- return NULL;
-
- list_for_each_entry(i, &rm->rsvps, list)
- if (i->enc_id == enc->base.id)
- return i;
-
- return NULL;
-}
-
-/**
- * _dpu_rm_release_rsvp - release resources and release a reservation
- * @rm: KMS handle
- * @rsvp: RSVP pointer to release and release resources for
- */
-static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
-{
- struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
- if (!rsvp)
- return;
-
- DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
-
- list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
- if (rsvp == rsvp_c) {
- list_del(&rsvp_c->list);
- break;
- }
- }
-
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->rsvp == rsvp) {
- blk->rsvp = NULL;
- DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
- rsvp->seq, rsvp->enc_id,
- blk->type, blk->id);
- }
- if (blk->rsvp_nxt == rsvp) {
- blk->rsvp_nxt = NULL;
- DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
- rsvp->seq, rsvp->enc_id,
- blk->type, blk->id);
+ if (blk->enc_id == enc_id) {
+ blk->enc_id = 0;
+ DPU_DEBUG("rel enc %d %d %d\n", enc_id,
+ type, blk->id);
}
}
}
-
- kfree(rsvp);
}
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{
- struct dpu_rm_rsvp *rsvp;
-
- if (!rm || !enc) {
- DPU_ERROR("invalid params\n");
- return;
- }
-
mutex_lock(&rm->rm_lock);
- rsvp = _dpu_rm_get_rsvp(rm, enc);
- if (!rsvp) {
- DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- goto end;
- }
+ _dpu_rm_release_reservation(rm, enc->base.id);
- _dpu_rm_release_rsvp(rm, rsvp);
-end:
mutex_unlock(&rm->rm_lock);
}
-static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
-{
- struct dpu_rm_hw_blk *blk;
- enum dpu_hw_blk_type type;
-
- /* Swap next rsvp to be the active */
- for (type = 0; type < DPU_HW_BLK_MAX; type++) {
- list_for_each_entry(blk, &rm->hw_blks[type], list) {
- if (blk->rsvp_nxt) {
- blk->rsvp = blk->rsvp_nxt;
- blk->rsvp_nxt = NULL;
- }
- }
- }
-}
-
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
@@ -815,7 +618,6 @@ int dpu_rm_reserve(
struct msm_display_topology topology,
bool test_only)
{
- struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
struct dpu_rm_requirements reqs;
int ret;
@@ -828,8 +630,6 @@ int dpu_rm_reserve(
mutex_lock(&rm->rm_lock);
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
-
ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
topology);
if (ret) {
@@ -837,50 +637,17 @@ int dpu_rm_reserve(
goto end;
}
- /*
- * We only support one active reservation per-hw-block. But to implement
- * transactional semantics for test-only, and for allowing failure while
- * modifying your existing reservation, over the course of this
- * function we can have two reservations:
- * Current: Existing reservation
- * Next: Proposed reservation. The proposed reservation may fail, or may
- * be discarded if in test-only mode.
- * If reservation is successful, and we're not in test-only, then we
- * replace the current with the next.
- */
- rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt) {
- ret = -ENOMEM;
- goto end;
- }
-
- rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
-
- /* Check the proposed reservation, store it in hw's "next" field */
- ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
-
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
-
+ ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_rsvp(rm, rsvp_nxt);
+ _dpu_rm_release_reservation(rm, enc->base.id);
} else if (test_only) {
- /*
- * Normally, if test_only, test the reservation and then undo
- * However, if the user requests LOCK, then keep the reservation
- * made during the atomic_check phase.
- */
- DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
- rsvp_nxt->seq, rsvp_nxt->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_nxt);
- } else {
- _dpu_rm_release_rsvp(rm, rsvp_cur);
-
- _dpu_rm_commit_rsvp(rm, rsvp_nxt);
+ /* test_only: test the reservation and then undo */
+ DPU_DEBUG("test_only: discard test [enc: %d]\n",
+ enc->base.id);
+ _dpu_rm_release_reservation(rm, enc->base.id);
}
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
-
end:
mutex_unlock(&rm->rm_lock);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index b8273bd23801..381611fc5877 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -22,22 +22,14 @@
/**
* struct dpu_rm - DPU dynamic hardware resource manager
- * @dev: device handle for event logging purposes
- * @rsvps: list of hardware reservations by each crtc->encoder->connector
* @hw_blks: array of lists of hardware resources present in the system, one
* list per type of hardware block
- * @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
- * @rsvp_next_seq: sequence number for next reservation for debugging purposes
* @rm_lock: resource manager mutex
*/
struct dpu_rm {
- struct drm_device *dev;
- struct list_head rsvps;
struct list_head hw_blks[DPU_HW_BLK_MAX];
- struct dpu_hw_mdp *hw_mdp;
uint32_t lm_max_width;
- uint32_t rsvp_next_seq;
struct mutex rm_lock;
};
@@ -67,13 +59,11 @@ struct dpu_rm_hw_iter {
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mmio: mapped register io address of MDP
- * @dev: device handle for event logging purposes
* @Return: 0 on Success otherwise -ERROR
*/
int dpu_rm_init(struct dpu_rm *rm,
struct dpu_mdss_cfg *cat,
- void __iomem *mmio,
- struct drm_device *dev);
+ void __iomem *mmio);
/**
* dpu_rm_destroy - Free all memory allocated by dpu_rm_init
@@ -112,14 +102,6 @@ int dpu_rm_reserve(struct dpu_rm *rm,
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
/**
- * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
- * This is never reserved, and is usable by any display.
- * @rm: DPU Resource Manager handle
- * @Return: Pointer to hw block or NULL
- */
-struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
-
-/**
* dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
* using dpu_rm_get_hw
* @iter: iter object to initialize
@@ -144,12 +126,4 @@ void dpu_rm_init_hw_iter(
* @Return: true on match found, false on no match found
*/
bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
-
-/**
- * dpu_rm_check_property_topctl - validate property bitmask before it is set
- * @val: user's proposed topology control bitmask
- * @Return: 0 on success or error
- */
-int dpu_rm_check_property_topctl(uint64_t val);
-
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index c78b521ceda1..8bb46090bd16 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -831,48 +831,42 @@ TRACE_EVENT(dpu_plane_disable,
);
DECLARE_EVENT_CLASS(dpu_rm_iter_template,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id),
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id),
TP_STRUCT__entry(
__field( uint32_t, id )
- __field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id )
),
TP_fast_assign(
__entry->id = id;
- __entry->type = type;
__entry->enc_id = enc_id;
),
- TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
- __entry->enc_id)
+ TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
+ TP_PROTO(uint32_t id, uint32_t enc_id),
+ TP_ARGS(id, enc_id)
);
TRACE_EVENT(dpu_rm_reserve_lms,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
- uint32_t pp_id),
- TP_ARGS(id, type, enc_id, pp_id),
+ TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id),
+ TP_ARGS(id, enc_id, pp_id),
TP_STRUCT__entry(
__field( uint32_t, id )
- __field( enum dpu_hw_blk_type, type )
__field( uint32_t, enc_id )
__field( uint32_t, pp_id )
),
TP_fast_assign(
__entry->id = id;
- __entry->type = type;
__entry->enc_id = enc_id;
__entry->pp_id = pp_id;
),
- TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
- __entry->type, __entry->enc_id, __entry->pp_id)
+ TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->enc_id, __entry->pp_id)
);
TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8747fb32a106..0bdd93648761 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -207,62 +207,44 @@ u32 msm_readl(const void __iomem *addr)
return val;
}
-struct vblank_event {
- struct list_head node;
+struct msm_vblank_work {
+ struct work_struct work;
int crtc_id;
bool enable;
+ struct msm_drm_private *priv;
};
-static void vblank_ctrl_worker(struct kthread_work *work)
+static void vblank_ctrl_worker(struct work_struct *work)
{
- struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
- struct msm_vblank_ctrl, work);
- struct msm_drm_private *priv = container_of(vbl_ctrl,
- struct msm_drm_private, vblank_ctrl);
+ struct msm_vblank_work *vbl_work = container_of(work,
+ struct msm_vblank_work, work);
+ struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
- struct vblank_event *vbl_ev, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
- if (vbl_ev->enable)
- kms->funcs->enable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
- else
- kms->funcs->disable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
-
- kfree(vbl_ev);
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- }
+ if (vbl_work->enable)
+ kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
int crtc_id, bool enable)
{
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev;
- unsigned long flags;
+ struct msm_vblank_work *vbl_work;
- vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
- if (!vbl_ev)
+ vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+ if (!vbl_work)
return -ENOMEM;
- vbl_ev->crtc_id = crtc_id;
- vbl_ev->enable = enable;
+ INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ vbl_work->crtc_id = crtc_id;
+ vbl_work->enable = enable;
+ vbl_work->priv = priv;
- kthread_queue_work(&priv->disp_thread[crtc_id].worker,
- &vbl_ctrl->work);
+ queue_work(priv->wq, &vbl_work->work);
return 0;
}
@@ -274,31 +256,20 @@ static int msm_drm_uninit(struct device *dev)
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_mdss *mdss = priv->mdss;
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev, *tmp;
int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- kthread_flush_work(&vbl_ctrl->work);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- kfree(vbl_ev);
- }
- /* clean up display commit/event worker threads */
- for (i = 0; i < priv->num_crtcs; i++) {
- if (priv->disp_thread[i].thread) {
- kthread_flush_worker(&priv->disp_thread[i].worker);
- kthread_stop(priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ /* clean up event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].thread) {
- kthread_flush_worker(&priv->event_thread[i].worker);
- kthread_stop(priv->event_thread[i].thread);
+ kthread_destroy_worker(&priv->event_thread[i].worker);
priv->event_thread[i].thread = NULL;
}
}
@@ -323,9 +294,6 @@ static int msm_drm_uninit(struct device *dev)
drm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -490,9 +458,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
- spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
@@ -554,27 +519,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
param.sched_priority = 16;
for (i = 0; i < priv->num_crtcs; i++) {
-
- /* initialize display thread */
- priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
- kthread_init_worker(&priv->disp_thread[i].worker);
- priv->disp_thread[i].dev = ddev;
- priv->disp_thread[i].thread =
- kthread_run(kthread_worker_fn,
- &priv->disp_thread[i].worker,
- "crtc_commit:%d", priv->disp_thread[i].crtc_id);
- if (IS_ERR(priv->disp_thread[i].thread)) {
- DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
- priv->disp_thread[i].thread = NULL;
- goto err_msm_uninit;
- }
-
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, &param);
- if (ret)
- dev_warn(dev, "disp_thread set priority failed: %d\n",
- ret);
-
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -589,13 +533,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
goto err_msm_uninit;
}
- /**
- * event thread should also run at same priority as disp_thread
- * because it is handling frame_done events. A lower priority
- * event thread and higher priority disp_thread can causes
- * frame_pending counters beyond 2. This can lead to commit
- * failure at crtc commit level.
- */
ret = sched_setscheduler(priv->event_thread[i].thread,
SCHED_FIFO, &param);
if (ret)
@@ -914,8 +851,12 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
ret = -EINVAL;
break;
}
- ret = copy_from_user(msm_obj->name,
- u64_to_user_ptr(args->value), args->len);
+ if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
+ args->len)) {
+ msm_obj->name[0] = '\0';
+ ret = -EFAULT;
+ break;
+ }
msm_obj->name[args->len] = '\0';
for (i = 0; i < args->len; i++) {
if (!isprint(msm_obj->name[i])) {
@@ -931,8 +872,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
}
args->len = strlen(msm_obj->name);
if (args->value) {
- ret = copy_to_user(u64_to_user_ptr(args->value),
- msm_obj->name, args->len);
+ if (copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len))
+ ret = -EFAULT;
}
break;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index bc18c69ccf7d..c56dade2c1dc 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
PLANE_PROP_MAX_NUM
};
-struct msm_vblank_ctrl {
- struct kthread_work work;
- struct list_head event_list;
- spinlock_t lock;
-};
-
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
@@ -126,7 +120,7 @@ struct msm_display_topology {
/**
* struct msm_display_info - defines display properties
- * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @intf_type: DRM_MODE_ENCODER_ type
* @capabilities: Bitmask of display flags
* @num_of_h_tiles: Number of horizontal tiles in case of split interface
* @h_tile_instance: Controller instance used per tile. Number of elements is
@@ -199,7 +193,6 @@ struct msm_drm_private {
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
- struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
@@ -228,7 +221,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier;
struct shrinker shrinker;
- struct msm_vblank_ctrl vblank_ctrl;
struct drm_atomic_state *pm_state;
};
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index ca17086f72c9..6241986bab51 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,6 +19,7 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/interconnect.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
@@ -118,6 +119,8 @@ struct msm_gpu {
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
+ struct icc_path *icc_path;
+
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61fc27d30ff2..40c47d6a7d78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -259,7 +259,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
.finalize_and_map = nouveau_dmem_fault_finalize_and_map,
};
-static int
+static vm_fault_t
nouveau_dmem_fault(struct hmm_devmem *devmem,
struct vm_area_struct *vma,
unsigned long addr,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index bb81e310eb6d..578d867a81d5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -79,6 +79,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto free_dev;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
+ if (ret)
+ goto disable_pci;
+
ret = qxl_device_init(qdev, &qxl_driver, pdev);
if (ret)
goto disable_pci;
@@ -94,7 +98,6 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto modeset_cleanup;
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
drm_fbdev_generic_setup(&qdev->ddev, 32);
return 0;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a97294ac96d5..a12439266bb0 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -4869,10 +4869,12 @@ static void ci_request_link_speed_change_before_state_change(struct radeon_devic
pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
+ /* fall through */
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
break;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index f471537c852f..1e14c6921454 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ break;
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e081f529..6a8fb6fd183c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0a785ef0ab66..c9f6cb77e857 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5762,10 +5762,12 @@ static void si_request_link_speed_change_before_state_change(struct radeon_devic
si_pi->force_pcie_gen = RADEON_PCIE_GEN2;
if (current_link_speed == RADEON_PCIE_GEN2)
break;
+ /* fall through */
case RADEON_PCIE_GEN2:
if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
#endif
+ /* fall through */
default:
si_pi->force_pcie_gen = si_get_current_pcie_speed(rdev);
break;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index e2942c9a11a7..35ddbec1375a 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -52,12 +52,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
{
int i;
- if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
+ if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
- entity->rq = rq_list[0];
+ entity->rq = NULL;
entity->guilty = guilty;
entity->num_rq_list = num_rq_list;
entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
@@ -67,6 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
for (i = 0; i < num_rq_list; ++i)
entity->rq_list[i] = rq_list[i];
+
+ if (num_rq_list)
+ entity->rq = rq_list[0];
+
entity->last_scheduled = NULL;
spin_lock_init(&entity->rq_lock);
@@ -165,6 +169,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
struct task_struct *last_user;
long ret = timeout;
+ if (!entity->rq)
+ return 0;
+
sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
@@ -264,20 +271,24 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched;
+ struct drm_gpu_scheduler *sched = NULL;
- sched = entity->rq->sched;
- drm_sched_rq_remove_entity(entity->rq, entity);
+ if (entity->rq) {
+ sched = entity->rq->sched;
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ }
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
*/
if (spsc_queue_peek(&entity->job_queue)) {
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
- */
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
+ if (sched) {
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+ */
+ kthread_park(sched->thread);
+ kthread_unpark(sched->thread);
+ }
if (entity->dependency) {
dma_fence_remove_callback(entity->dependency,
&entity->cb);
@@ -362,9 +373,11 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
for (i = 0; i < entity->num_rq_list; ++i)
drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
- drm_sched_rq_remove_entity(entity->rq, entity);
- drm_sched_entity_set_rq_priority(&entity->rq, priority);
- drm_sched_rq_add_entity(entity->rq, entity);
+ if (entity->rq) {
+ drm_sched_rq_remove_entity(entity->rq, entity);
+ drm_sched_entity_set_rq_priority(&entity->rq, priority);
+ drm_sched_rq_add_entity(entity->rq, entity);
+ }
spin_unlock(&entity->rq_lock);
}
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d5a23295dd80..bb7b58407039 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
- drm_gem_object_put(&gobj->base);
+ drm_gem_object_put_unlocked(&gobj->base);
unlock:
mutex_unlock(&udl->gem_lock);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 31786b200afc..a3357ff7540d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -311,7 +311,13 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
{
- return sg_page_iter_dma_address(&viter->iter);
+ /*
+ * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
+ * needs revision. See
+ * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
+ */
+ return sg_page_iter_dma_address(
+ container_of(&viter->iter, struct sg_dma_page_iter, base));
}
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 163fadb8a33a..d047a6867c59 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -277,9 +277,10 @@ void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_uv_offset);
-void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
+void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride,
+ u32 pixelformat)
{
- u32 ilo, sly;
+ u32 ilo, sly, sluv;
if (stride < 0) {
stride = -stride;
@@ -290,9 +291,30 @@ void ipu_cpmem_interlaced_scan(struct ipuv3_channel *ch, int stride)
sly = (stride * 2) - 1;
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ sluv = stride / 2 - 1;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ sluv = stride - 1;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ sluv = stride - 1;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ sluv = stride * 2 - 1;
+ break;
+ default:
+ sluv = 0;
+ break;
+ }
+
ipu_ch_param_write_field(ch, IPU_FIELD_SO, 1);
ipu_ch_param_write_field(ch, IPU_FIELD_ILO, ilo);
ipu_ch_param_write_field(ch, IPU_FIELD_SLY, sly);
+ if (sluv)
+ ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, sluv);
};
EXPORT_SYMBOL_GPL(ipu_cpmem_interlaced_scan);
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index aa0e30a2ba18..d1e575571a8d 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -325,12 +325,21 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
return 0;
}
+/* translate alternate field mode based on given standard */
+static inline enum v4l2_field
+ipu_csi_translate_field(enum v4l2_field field, v4l2_std_id std)
+{
+ return (field != V4L2_FIELD_ALTERNATE) ? field :
+ ((std & V4L2_STD_525_60) ?
+ V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB);
+}
+
/*
* Fill a CSI bus config struct from mbus_config and mbus_framefmt.
*/
static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
- struct v4l2_mbus_config *mbus_cfg,
- struct v4l2_mbus_framefmt *mbus_fmt)
+ const struct v4l2_mbus_config *mbus_cfg,
+ const struct v4l2_mbus_framefmt *mbus_fmt)
{
int ret;
@@ -374,22 +383,76 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
return 0;
}
+static int
+ipu_csi_set_bt_interlaced_codes(struct ipu_csi *csi,
+ const struct v4l2_mbus_framefmt *infmt,
+ const struct v4l2_mbus_framefmt *outfmt,
+ v4l2_std_id std)
+{
+ enum v4l2_field infield, outfield;
+ bool swap_fields;
+
+ /* get translated field type of input and output */
+ infield = ipu_csi_translate_field(infmt->field, std);
+ outfield = ipu_csi_translate_field(outfmt->field, std);
+
+ /*
+ * Write the H-V-F codes the CSI will match against the
+ * incoming data for start/end of active and blanking
+ * field intervals. If input and output field types are
+ * sequential but not the same (one is SEQ_BT and the other
+ * is SEQ_TB), swap the F-bit so that the CSI will capture
+ * field 1 lines before field 0 lines.
+ */
+ swap_fields = (V4L2_FIELD_IS_SEQUENTIAL(infield) &&
+ V4L2_FIELD_IS_SEQUENTIAL(outfield) &&
+ infield != outfield);
+
+ if (!swap_fields) {
+ /*
+ * Field0BlankEnd = 110, Field0BlankStart = 010
+ * Field0ActiveEnd = 100, Field0ActiveStart = 000
+ * Field1BlankEnd = 111, Field1BlankStart = 011
+ * Field1ActiveEnd = 101, Field1ActiveStart = 001
+ */
+ ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
+ } else {
+ dev_dbg(csi->ipu->dev, "capture field swap\n");
+
+ /* same as above but with F-bit inverted */
+ ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
+ CSI_CCIR_CODE_1);
+ ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
+ }
+
+ ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+
+ return 0;
+}
+
+
int ipu_csi_init_interface(struct ipu_csi *csi,
- struct v4l2_mbus_config *mbus_cfg,
- struct v4l2_mbus_framefmt *mbus_fmt)
+ const struct v4l2_mbus_config *mbus_cfg,
+ const struct v4l2_mbus_framefmt *infmt,
+ const struct v4l2_mbus_framefmt *outfmt)
{
struct ipu_csi_bus_config cfg;
unsigned long flags;
u32 width, height, data = 0;
+ v4l2_std_id std;
int ret;
- ret = fill_csi_bus_cfg(&cfg, mbus_cfg, mbus_fmt);
+ ret = fill_csi_bus_cfg(&cfg, mbus_cfg, infmt);
if (ret < 0)
return ret;
/* set default sensor frame width and height */
- width = mbus_fmt->width;
- height = mbus_fmt->height;
+ width = infmt->width;
+ height = infmt->height;
+ if (infmt->field == V4L2_FIELD_ALTERNATE)
+ height *= 2;
/* Set the CSI_SENS_CONF register remaining fields */
data |= cfg.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
@@ -416,42 +479,22 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
break;
case IPU_CSI_CLK_MODE_CCIR656_INTERLACED:
- if (mbus_fmt->width == 720 && mbus_fmt->height == 576) {
- /*
- * PAL case
- *
- * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
- * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
- * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
- * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
- */
- height = 625; /* framelines for PAL */
-
- ipu_csi_write(csi, 0x40596 | CSI_CCIR_ERR_DET_EN,
- CSI_CCIR_CODE_1);
- ipu_csi_write(csi, 0xD07DF, CSI_CCIR_CODE_2);
- ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
- } else if (mbus_fmt->width == 720 && mbus_fmt->height == 480) {
- /*
- * NTSC case
- *
- * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
- * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
- * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
- * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
- */
- height = 525; /* framelines for NTSC */
-
- ipu_csi_write(csi, 0xD07DF | CSI_CCIR_ERR_DET_EN,
- CSI_CCIR_CODE_1);
- ipu_csi_write(csi, 0x40596, CSI_CCIR_CODE_2);
- ipu_csi_write(csi, 0xFF0000, CSI_CCIR_CODE_3);
+ if (width == 720 && height == 480) {
+ std = V4L2_STD_NTSC;
+ height = 525;
+ } else if (width == 720 && height == 576) {
+ std = V4L2_STD_PAL;
+ height = 625;
} else {
dev_err(csi->ipu->dev,
- "Unsupported CCIR656 interlaced video mode\n");
- spin_unlock_irqrestore(&csi->lock, flags);
- return -EINVAL;
+ "Unsupported interlaced video mode\n");
+ ret = -EINVAL;
+ goto out_unlock;
}
+
+ ret = ipu_csi_set_bt_interlaced_codes(csi, infmt, outfmt, std);
+ if (ret)
+ goto out_unlock;
break;
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR:
case IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR:
@@ -476,9 +519,10 @@ int ipu_csi_init_interface(struct ipu_csi *csi,
dev_dbg(csi->ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
ipu_csi_read(csi, CSI_ACT_FRM_SIZE));
+out_unlock:
spin_unlock_irqrestore(&csi->lock, flags);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(ipu_csi_init_interface);
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 4a28f3fbb0a2..6cacfd61d984 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -265,6 +265,12 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
}
+bool ipu_pre_update_pending(struct ipu_pre *pre)
+{
+ return !!(readl_relaxed(pre->regs + IPU_PRE_CTRL) &
+ IPU_PRE_CTRL_SDW_UPDATE);
+}
+
u32 ipu_pre_get_baddr(struct ipu_pre *pre)
{
return (u32)pre->buffer_paddr;
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 38a3a9764e49..94b76badf677 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -347,6 +347,22 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
}
EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
+bool ipu_prg_channel_configure_pending(struct ipuv3_channel *ipu_chan)
+{
+ int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+ struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+ struct ipu_prg_channel *chan;
+
+ if (prg_chan < 0)
+ return false;
+
+ chan = &prg->chan[prg_chan];
+ WARN_ON(!chan->enabled);
+
+ return ipu_pre_update_pending(prg->pres[chan->used_pre]);
+}
+EXPORT_SYMBOL_GPL(ipu_prg_channel_configure_pending);
+
static int ipu_prg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index d6beee99b6b8..38622e835e95 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -272,6 +272,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
unsigned int height, unsigned int stride, u32 format,
uint64_t modifier, unsigned int bufaddr);
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
+bool ipu_pre_update_pending(struct ipu_pre *pre);
struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,
int ipu_id);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index dc8e039bfab5..f2f3ef8af271 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -48,6 +48,8 @@
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/screen_info.h>
+#include <linux/vt.h>
+#include <linux/console.h>
#include <linux/uaccess.h>
@@ -168,6 +170,53 @@ void vga_set_default_device(struct pci_dev *pdev)
vga_default = pci_dev_get(pdev);
}
+/**
+ * vga_remove_vgacon - deactivete vga console
+ *
+ * Unbind and unregister vgacon in case pdev is the default vga
+ * device. Can be called by gpu drivers on initialization to make
+ * sure vga register access done by vgacon will not disturb the
+ * device.
+ *
+ * @pdev: pci device.
+ */
+#if !defined(CONFIG_VGA_CONSOLE)
+int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ return -ENODEV;
+}
+#else
+int vga_remove_vgacon(struct pci_dev *pdev)
+{
+ int ret = 0;
+
+ if (pdev != vga_default)
+ return 0;
+ vgaarb_info(&pdev->dev, "deactivate vga console\n");
+
+ console_lock();
+ if (con_is_bound(&vga_con))
+ ret = do_take_over_console(&dummy_con, 0,
+ MAX_NR_CONSOLES - 1, 1);
+ if (ret == 0) {
+ ret = do_unregister_con_driver(&vga_con);
+
+ /* Ignore "already unregistered". */
+ if (ret == -ENODEV)
+ ret = 0;
+ }
+ console_unlock();
+
+ return ret;
+}
+#endif
+EXPORT_SYMBOL(vga_remove_vgacon);
+
static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
{
if (vgadev->irq_set_state)