diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2023-03-30 08:53:12 +0200 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2023-03-30 08:53:12 +0200 |
commit | 8ba264f418f734aade3a77086bb1d51d0e2723ce (patch) | |
tree | ca6749e28544c393305b8606c0230ac50126041a /drivers/gpu/drm/amd/amdgpu | |
parent | fbb3b3500f76ec8b741bd2d0e761ca3e856ad924 (diff) | |
parent | 82bbec189ab34873688484cd14189a5392946fbb (diff) | |
download | linux-8ba264f418f734aade3a77086bb1d51d0e2723ce.tar.gz |
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Backmerge to get rc4.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
63 files changed, 2880 insertions, 643 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 5d1e28218020..12adca8c7819 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -3,6 +3,7 @@ config DRM_AMDGPU tristate "AMD GPU" depends on DRM && PCI && MMU + depends on !UML select FW_LOADER select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 1d72cbc85348..5f9ac1bcb6bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \ amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \ - amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o \ + amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o amdgpu_hdp.o \ amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ @@ -148,6 +148,7 @@ amdgpu-y += \ sdma_v3_0.o \ sdma_v4_0.o \ sdma_v4_4.o \ + sdma_v4_4_2.o \ sdma_v5_0.o \ sdma_v5_2.o \ sdma_v6_0.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dda88090f044..8cf2cc50b3de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -50,7 +50,6 @@ #include <linux/hashtable.h> #include <linux/dma-fence.h> #include <linux/pci.h> -#include <linux/aer.h> #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> @@ -1005,7 +1004,6 @@ struct amdgpu_device { bool in_runpm; bool has_pr3; - bool pm_sysfs_en; bool ucode_sysfs_en; bool psp_sysfs_en; @@ -1095,18 +1093,14 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr); u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr); void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u32 reg_data); void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u64 reg_data); - +u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); @@ -1254,6 +1248,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); +bool amdgpu_device_aspm_support_quirk(void); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); @@ -1373,10 +1368,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); +bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } +static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, @@ -1387,11 +1384,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); -bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } -static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index d4196fcb85a0..60b1857f469e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev) return true; } + +/** + * amdgpu_acpi_should_gpu_reset + * + * @adev: amdgpu_device_pointer + * + * returns true if should reset GPU, false if not + */ +bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + if (amdgpu_sriov_vf(adev)) + return false; + +#if IS_ENABLED(CONFIG_SUSPEND) + return pm_suspend_target_state != PM_SUSPEND_TO_IDLE; +#else + return true; +#endif +} + /* * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods * @@ -1043,24 +1066,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) } /** - * amdgpu_acpi_should_gpu_reset - * - * @adev: amdgpu_device_pointer - * - * returns true if should reset GPU, false if not - */ -bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) -{ - if (adev->flags & AMD_IS_APU) - return false; - - if (amdgpu_sriov_vf(adev)) - return false; - - return pm_suspend_target_state != PM_SUSPEND_TO_IDLE; -} - -/** * amdgpu_acpi_is_s0ix_active * * @adev: amdgpu_device_pointer diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 333780491867..01ba3589b60a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -308,6 +308,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); +int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, + struct dma_buf **dmabuf); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d6320c836251..c87515210c4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -711,6 +711,21 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, } } +static int kfd_mem_export_dmabuf(struct kgd_mem *mem) +{ + if (!mem->dmabuf) { + struct dma_buf *ret = amdgpu_gem_prime_export( + &mem->bo->tbo.base, + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DRM_RDWR : 0); + if (IS_ERR(ret)) + return PTR_ERR(ret); + mem->dmabuf = ret; + } + + return 0; +} + static int kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo **bo) @@ -718,16 +733,9 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, struct drm_gem_object *gobj; int ret; - if (!mem->dmabuf) { - mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, - mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? - DRM_RDWR : 0); - if (IS_ERR(mem->dmabuf)) { - ret = PTR_ERR(mem->dmabuf); - mem->dmabuf = NULL; - return ret; - } - } + ret = kfd_mem_export_dmabuf(mem); + if (ret) + return ret; gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); if (IS_ERR(gobj)) @@ -1575,7 +1583,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); - size_t available; + ssize_t available; spin_lock(&kfd_mem_limit.mem_limit_lock); available = adev->gmc.real_vram_size @@ -1584,6 +1592,9 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) - reserved_for_pt; spin_unlock(&kfd_mem_limit.mem_limit_lock); + if (available < 0) + available = 0; + return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN); } @@ -2210,30 +2221,27 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct amdgpu_bo *bo; int ret; - if (dma_buf->ops != &amdgpu_dmabuf_ops) - /* Can't handle non-graphics buffers */ - return -EINVAL; - - obj = dma_buf->priv; - if (drm_to_adev(obj->dev) != adev) - /* Can't handle buffers from other devices */ - return -EINVAL; + obj = amdgpu_gem_prime_import(adev_to_drm(adev), dma_buf); + if (IS_ERR(obj)) + return PTR_ERR(obj); bo = gem_to_amdgpu_bo(obj); if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT))) + AMDGPU_GEM_DOMAIN_GTT))) { /* Only VRAM and GTT BOs are supported */ - return -EINVAL; + ret = -EINVAL; + goto err_put_obj; + } *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL); - if (!*mem) - return -ENOMEM; + if (!*mem) { + ret = -ENOMEM; + goto err_put_obj; + } ret = drm_vma_node_allow(&obj->vma_node, drm_priv); - if (ret) { - kfree(*mem); - return ret; - } + if (ret) + goto err_free_mem; if (size) *size = amdgpu_bo_size(bo); @@ -2250,7 +2258,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE; - drm_gem_object_get(&bo->tbo.base); + get_dma_buf(dma_buf); + (*mem)->dmabuf = dma_buf; (*mem)->bo = bo; (*mem)->va = va; (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? @@ -2262,6 +2271,29 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, (*mem)->is_imported = true; return 0; + +err_free_mem: + kfree(*mem); +err_put_obj: + drm_gem_object_put(obj); + return ret; +} + +int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, + struct dma_buf **dma_buf) +{ + int ret; + + mutex_lock(&mem->lock); + ret = kfd_mem_export_dmabuf(mem); + if (ret) + goto out; + + get_dma_buf(mem->dmabuf); + *dma_buf = mem->dmabuf; +out: + mutex_unlock(&mem->lock); + return ret; } /* Evict a userptr BO by stopping the queues if necessary diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c4a4e2fe6681..f5658359ff5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -35,6 +35,7 @@ #include <linux/devcoredump.h> #include <generated/utsrelease.h> #include <linux/pci-p2pdma.h> +#include <linux/apple-gmux.h> #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> @@ -80,6 +81,10 @@ #include <drm/drm_drv.h> +#if IS_ENABLED(CONFIG_X86) +#include <asm/intel-family.h> +#endif + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -675,20 +680,20 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) * amdgpu_device_indirect_rreg - read an indirect register * * @adev: amdgpu_device pointer - * @pcie_index: mmio register offset - * @pcie_data: mmio register offset * @reg_addr: indirect register address to read from * * Returns the value of indirect register @reg_addr */ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr) { - unsigned long flags; - u32 r; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + u32 r; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; @@ -706,20 +711,20 @@ u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, * amdgpu_device_indirect_rreg64 - read a 64bits indirect register * * @adev: amdgpu_device pointer - * @pcie_index: mmio register offset - * @pcie_data: mmio register offset * @reg_addr: indirect register address to read from * * Returns the value of indirect register @reg_addr */ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr) { - unsigned long flags; - u64 r; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + u64 r; + + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; @@ -749,13 +754,15 @@ u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, * */ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u32 reg_data) { - unsigned long flags; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; @@ -778,13 +785,15 @@ void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, * */ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u64 reg_data) { - unsigned long flags; + unsigned long flags, pcie_index, pcie_data; void __iomem *pcie_index_offset; void __iomem *pcie_data_offset; + pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); + pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); + spin_lock_irqsave(&adev->pcie_idx_lock, flags); pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; @@ -803,6 +812,18 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, } /** + * amdgpu_device_get_rev_id - query device rev_id + * + * @adev: amdgpu_device pointer + * + * Return device rev_id + */ +u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) +{ + return adev->nbio.funcs->get_rev_id(adev); +} + +/** * amdgpu_invalid_rreg - dummy reg read function * * @adev: amdgpu_device pointer @@ -1356,6 +1377,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev) return pcie_aspm_enabled(adev->pdev); } +bool amdgpu_device_aspm_support_quirk(void) +{ +#if IS_ENABLED(CONFIG_X86) + struct cpuinfo_x86 *c = &cpu_data(0); + + return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); +#else + return true; +#endif +} + /* if we get transitioned to only one device, take VGA back */ /** * amdgpu_device_vga_set_decode - enable/disable vga decode @@ -3773,8 +3805,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } - pci_enable_pcie_error_reporting(adev->pdev); - /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { if (!adev->bios) { @@ -3864,11 +3894,8 @@ fence_driver_init: adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); r = amdgpu_pm_sysfs_init(adev); - if (r) { - adev->pm_sysfs_en = false; - DRM_ERROR("registering pm debugfs failed (%d).\n", r); - } else - adev->pm_sysfs_en = true; + if (r) + DRM_ERROR("registering pm sysfs failed (%d).\n", r); r = amdgpu_ucode_sysfs_init(adev); if (r) { @@ -3930,12 +3957,15 @@ fence_driver_init: if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); - if (amdgpu_device_supports_px(ddev)) { - px = true; + px = amdgpu_device_supports_px(ddev); + + if (px || (!pci_is_thunderbolt_attached(adev->pdev) && + apple_gmux_detect(NULL, NULL))) vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, px); + + if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - } if (adev->gmc.xgmi.pending_reset) queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work, @@ -4011,7 +4041,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) if (adev->mman.initialized) drain_workqueue(adev->mman.bdev.wq); - if (adev->pm_sysfs_en) + if (adev->pm.sysfs_initialized) amdgpu_pm_sysfs_fini(adev); if (adev->ucode_sysfs_en) amdgpu_ucode_sysfs_fini(adev); @@ -4039,6 +4069,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) void amdgpu_device_fini_sw(struct amdgpu_device *adev) { int idx; + bool px; amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); @@ -4057,10 +4088,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; - if (amdgpu_device_supports_px(adev_to_drm(adev))) { + + px = amdgpu_device_supports_px(adev_to_drm(adev)); + + if (px || (!pci_is_thunderbolt_attached(adev->pdev) && + apple_gmux_detect(NULL, NULL))) vga_switcheroo_unregister_client(adev->pdev); + + if (px) vga_switcheroo_fini_domain_pm_ops(adev->dev); - } + if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_unregister(adev->pdev); @@ -4145,8 +4182,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); - drm_kms_helper_poll_disable(dev); - if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); @@ -4243,8 +4278,6 @@ exit: if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); - drm_kms_helper_poll_enable(dev); - amdgpu_ras_resume(adev); if (adev->mode_info.num_crtc) { @@ -5582,7 +5615,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!amdgpu_device_supports_baco(adev_to_drm(adev))) + if (!amdgpu_device_supports_baco(dev)) return -ENOTSUPP; if (ras && adev->ras_enabled && @@ -5598,7 +5631,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev) struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0; - if (!amdgpu_device_supports_baco(adev_to_drm(adev))) + if (!amdgpu_device_supports_baco(dev)) return -ENOTSUPP; ret = amdgpu_dpm_baco_exit(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b719852daa07..77a8b05d3868 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -41,6 +41,7 @@ #include "vega10_ih.h" #include "vega20_ih.h" #include "sdma_v4_0.h" +#include "sdma_v4_4_2.h" #include "uvd_v7_0.h" #include "vce_v4_0.h" #include "vcn_v1_0.h" @@ -543,6 +544,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, struct harvest_table *harvest_info; u16 offset; int i; + uint32_t umc_harvest_config = 0; bhdr = (struct binary_header *)adev->mman.discovery_bin; offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); @@ -570,12 +572,17 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; break; case UMC_HWID: + umc_harvest_config |= + 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); (*umc_harvest_count)++; break; default: break; } } + + adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & + ~umc_harvest_config; } /* ================================================== */ @@ -705,7 +712,7 @@ static void ip_hw_instance_release(struct kobject *kobj) kfree(ip_hw_instance); } -static struct kobj_type ip_hw_instance_ktype = { +static const struct kobj_type ip_hw_instance_ktype = { .release = ip_hw_instance_release, .sysfs_ops = &ip_hw_instance_sysfs_ops, .default_groups = ip_hw_instance_groups, @@ -724,7 +731,7 @@ static void ip_hw_id_release(struct kobject *kobj) kfree(ip_hw_id); } -static struct kobj_type ip_hw_id_ktype = { +static const struct kobj_type ip_hw_id_ktype = { .release = ip_hw_id_release, .sysfs_ops = &kobj_sysfs_ops, }; @@ -787,18 +794,18 @@ static const struct sysfs_ops ip_die_entry_sysfs_ops = { .show = ip_die_entry_attr_show, }; -static struct kobj_type ip_die_entry_ktype = { +static const struct kobj_type ip_die_entry_ktype = { .release = ip_die_entry_release, .sysfs_ops = &ip_die_entry_sysfs_ops, .default_groups = ip_die_entry_groups, }; -static struct kobj_type die_kobj_ktype = { +static const struct kobj_type die_kobj_ktype = { .release = die_kobj_release, .sysfs_ops = &kobj_sysfs_ops, }; -static struct kobj_type ip_discovery_ktype = { +static const struct kobj_type ip_discovery_ktype = { .release = ip_disc_release, .sysfs_ops = &kobj_sysfs_ops, }; @@ -1156,8 +1163,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) AMDGPU_MAX_SDMA_INSTANCES); } - if (le16_to_cpu(ip->hw_id) == UMC_HWID) + if (le16_to_cpu(ip->hw_id) == UMC_HWID) { adev->gmc.num_umc++; + adev->umc.node_inst_num++; + } for (k = 0; k < num_base_address; k++) { /* @@ -1583,6 +1592,7 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 1): case IP_VERSION(4, 4, 0): + case IP_VERSION(4, 4, 2): amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); break; case IP_VERSION(5, 0, 0): @@ -1641,6 +1651,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 3): case IP_VERSION(13, 0, 5): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): case IP_VERSION(13, 0, 10): @@ -1834,6 +1845,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 4, 0): amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); break; + case IP_VERSION(4, 4, 2): + amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); + break; case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 1): case IP_VERSION(5, 0, 2): @@ -2296,6 +2310,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 1): case IP_VERSION(4, 4, 0): + case IP_VERSION(4, 4, 2): adev->hdp.funcs = &hdp_v4_0_funcs; break; case IP_VERSION(5, 0, 0): diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 503f89a766c3..d60fe7eb5579 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) struct drm_connector_list_iter iter; int r; + drm_kms_helper_poll_disable(dev); + /* turn off display hw */ drm_modeset_lock_all(dev); drm_connector_list_iter_begin(dev, &iter); @@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) drm_modeset_unlock_all(dev); + drm_kms_helper_poll_enable(dev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f5ffca24def4..ba5def374368 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2467,7 +2467,10 @@ static int amdgpu_pmops_freeze(struct device *dev) adev->in_s4 = false; if (r) return r; - return amdgpu_asic_reset(adev); + + if (amdgpu_acpi_should_gpu_reset(adev)) + return amdgpu_asic_reset(adev); + return 0; } static int amdgpu_pmops_thaw(struct device *dev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c index 99a7855ab1bc..c57252f004e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -60,12 +60,13 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) struct amdgpu_fpriv *fpriv = file->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; - uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0; + struct amdgpu_mem_stats stats; ktime_t usage[AMDGPU_HW_IP_NUM]; uint32_t bus, dev, fn, domain; unsigned int hw_ip; int ret; + memset(&stats, 0, sizeof(stats)); bus = adev->pdev->bus->number; domain = pci_domain_nr(adev->pdev->bus); dev = PCI_SLOT(adev->pdev->devfn); @@ -75,7 +76,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) if (ret) return; - amdgpu_vm_get_memory(vm, &vram_mem, >t_mem, &cpu_mem); + amdgpu_vm_get_memory(vm, &stats); amdgpu_bo_unreserve(vm->root.bo); amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage); @@ -90,9 +91,22 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) seq_printf(m, "drm-driver:\t%s\n", file->minor->dev->driver->name); seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n", domain, bus, dev, fn); seq_printf(m, "drm-client-id:\t%Lu\n", vm->immediate.fence_context); - seq_printf(m, "drm-memory-vram:\t%llu KiB\n", vram_mem/1024UL); - seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", gtt_mem/1024UL); - seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", cpu_mem/1024UL); + seq_printf(m, "drm-memory-vram:\t%llu KiB\n", stats.vram/1024UL); + seq_printf(m, "drm-memory-gtt: \t%llu KiB\n", stats.gtt/1024UL); + seq_printf(m, "drm-memory-cpu: \t%llu KiB\n", stats.cpu/1024UL); + seq_printf(m, "amd-memory-visible-vram:\t%llu KiB\n", + stats.visible_vram/1024UL); + seq_printf(m, "amd-evicted-vram:\t%llu KiB\n", + stats.evicted_vram/1024UL); + seq_printf(m, "amd-evicted-visible-vram:\t%llu KiB\n", + stats.evicted_visible_vram/1024UL); + seq_printf(m, "amd-requested-vram:\t%llu KiB\n", + stats.requested_vram/1024UL); + seq_printf(m, "amd-requested-visible-vram:\t%llu KiB\n", + stats.requested_visible_vram/1024UL); + seq_printf(m, "amd-requested-gtt:\t%llu KiB\n", + stats.requested_gtt/1024UL); + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { if (!usage[hw_ip]) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index faff4a3f96e6..f52d0ba91a77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) ptr = &ring->fence_drv.fences[i]; old = rcu_dereference_protected(*ptr, 1); if (old && old->ops == &amdgpu_job_fence_ops) { + struct amdgpu_job *job; + + /* For non-scheduler bad job, i.e. failed ib test, we need to signal + * it right here or we won't be able to track them in fence_drv + * and they will remain unsignaled during sa_bo free. + */ + job = container_of(old, struct amdgpu_job, hw_fence); + if (!job->base.s_fence && !dma_fence_is_signaled(old)) + dma_fence_signal(old); RCU_INIT_POINTER(*ptr, NULL); dma_fence_put(old); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 35ed46b9249c..c50d59855011 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -725,7 +725,7 @@ int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) /* If not define special ras_late_init function, use gfx default ras_late_init */ if (!ras->ras_block.ras_late_init) - ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; /* If not defined special ras_cb function, use default ras_cb */ if (!ras->ras_block.ras_cb) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 12a6826caef4..655fc8bf936d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -447,13 +447,42 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, } while (fault->timestamp < tmp); } -int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev) +int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev) { - if (!adev->gmc.xgmi.connected_to_cpu) { - adev->gmc.xgmi.ras = &xgmi_ras; - amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); - adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm; - } + int r; + + /* umc ras block */ + r = amdgpu_umc_ras_sw_init(adev); + if (r) + return r; + + /* mmhub ras block */ + r = amdgpu_mmhub_ras_sw_init(adev); + if (r) + return r; + + /* hdp ras block */ + r = amdgpu_hdp_ras_sw_init(adev); + if (r) + return r; + + /* mca.x ras block */ + r = amdgpu_mca_mp0_ras_sw_init(adev); + if (r) + return r; + + r = amdgpu_mca_mp1_ras_sw_init(adev); + if (r) + return r; + + r = amdgpu_mca_mpio_ras_sw_init(adev); + if (r) + return r; + + /* xgmi ras block */ + r = amdgpu_xgmi_ras_sw_init(adev); + if (r) + return r; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 0305b660cd17..232523e3e270 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -104,6 +104,8 @@ struct amdgpu_vmhub { uint32_t vm_cntx_cntl_vm_fault; uint32_t vm_l2_bank_select_reserved_cid2; + uint32_t vm_contexts_disable; + const struct amdgpu_vmhub_funcs *vmhub_funcs; }; @@ -351,7 +353,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint16_t pasid, uint64_t timestamp); void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid); -int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev); +int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c new file mode 100644 index 000000000000..b6cf801939aa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -0,0 +1,48 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_hdp_ras *ras; + + if (!adev->hdp.ras) + return 0; + + ras = adev->hdp.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register hdp ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "hdp"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__HDP; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->hdp.ras_if = &ras->ras_block.ras_comm; + + /* hdp ras follows amdgpu_ras_block_late_init_default for late init */ + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index ac5c61d3de2b..7b8a6152dc8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -43,5 +43,5 @@ struct amdgpu_hdp { struct amdgpu_hdp_ras *ras; }; -int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); +int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6f81ed4fb0d9..479d9bcc99ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -236,19 +236,28 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, return 0; } -void jpeg_set_ras_funcs(struct amdgpu_device *adev) +int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev) { + int err; + struct amdgpu_jpeg_ras *ras; + if (!adev->jpeg.ras) - return; + return 0; - amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block); + ras = adev->jpeg.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register jpeg ras block!\n"); + return err; + } - strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg"); - adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG; - adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; - adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm; + strcpy(ras->ras_block.ras_comm.name, "jpeg"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; + adev->jpeg.ras_if = &ras->ras_block.ras_comm; - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->jpeg.ras->ras_block.ras_late_init) - adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h index e8ca3e32ad52..0ca76f0f23e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h @@ -72,6 +72,6 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -void jpeg_set_ras_funcs(struct amdgpu_device *adev); +int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev); #endif /*__AMDGPU_JPEG_H__*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 51c2a82e2fa4..8d9ff9e151de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -70,3 +70,75 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, amdgpu_mca_reset_error_count(adev, mc_status_addr); } + +int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mca_ras_block *ras; + + if (!adev->mca.mp0.ras) + return 0; + + ras = adev->mca.mp0.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mca.mp0"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mca.mp0.ras_if = &ras->ras_block.ras_comm; + + return 0; +} + +int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mca_ras_block *ras; + + if (!adev->mca.mp1.ras) + return 0; + + ras = adev->mca.mp1.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mca.mp1"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mca.mp1.ras_if = &ras->ras_block.ras_comm; + + return 0; +} + +int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mca_ras_block *ras; + + if (!adev->mca.mpio.ras) + return 0; + + ras = adev->mca.mpio.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mca.mpio ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mca.mpio"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mca.mpio.ras_if = &ras->ras_block.ras_comm; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index 7ce16d16e34b..997a073e2409 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -30,12 +30,7 @@ struct amdgpu_mca_ras { struct amdgpu_mca_ras_block *ras; }; -struct amdgpu_mca_funcs { - void (*init)(struct amdgpu_device *adev); -}; - struct amdgpu_mca { - const struct amdgpu_mca_funcs *funcs; struct amdgpu_mca_ras mp0; struct amdgpu_mca_ras mp1; struct amdgpu_mca_ras mpio; @@ -55,5 +50,7 @@ void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, uint64_t mc_status_addr, void *ras_error_status); - +int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev); +int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c new file mode 100644 index 000000000000..0f6b1021fef3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_mmhub_ras *ras; + + if (!adev->mmhub.ras) + return 0; + + ras = adev->mmhub.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register mmhub ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "mmhub"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mmhub.ras_if = &ras->ras_block.ras_comm; + + /* mmhub ras follows amdgpu_ras_block_late_init_default for late init */ + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 93430d3823c9..d21bb6dae56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -48,5 +48,7 @@ struct amdgpu_mmhub { struct amdgpu_mmhub_ras *ras; }; +int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 37d779b8e4a6..a3bc00577a7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -22,6 +22,29 @@ #include "amdgpu.h" #include "amdgpu_ras.h" +int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_nbio_ras *ras; + + if (!adev->nbio.ras) + return 0; + + ras = adev->nbio.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register pcie_bif ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "pcie_bif"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->nbio.ras_if = &ras->ras_block.ras_comm; + + return 0; +} + int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index a240336bbc6b..c686ff4bcc39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -106,5 +106,6 @@ struct amdgpu_nbio { struct amdgpu_nbio_ras *ras; }; +int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 858d881ab9a1..2bd1a54ee866 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1265,24 +1265,41 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } -void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem) +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, + struct amdgpu_mem_stats *stats) { unsigned int domain; + uint64_t size = amdgpu_bo_size(bo); domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - *vram_mem += amdgpu_bo_size(bo); + stats->vram += size; + if (amdgpu_bo_in_cpu_visible_vram(bo)) + stats->visible_vram += size; break; case AMDGPU_GEM_DOMAIN_GTT: - *gtt_mem += amdgpu_bo_size(bo); + stats->gtt += size; break; case AMDGPU_GEM_DOMAIN_CPU: default: - *cpu_mem += amdgpu_bo_size(bo); + stats->cpu += size; break; } + + if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) { + stats->requested_vram += size; + if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + stats->requested_visible_vram += size; + + if (domain != AMDGPU_GEM_DOMAIN_VRAM) { + stats->evicted_vram += size; + if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) + stats->evicted_visible_vram += size; + } + } else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) { + stats->requested_gtt += size; + } } /** @@ -1315,7 +1332,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) || - adev->in_suspend || adev->shutdown) + adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev))) return; if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 5a85726ce853..35b8106816a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -126,6 +126,27 @@ struct amdgpu_bo_vm { struct amdgpu_vm_bo_base entries[]; }; +struct amdgpu_mem_stats { + /* current VRAM usage, includes visible VRAM */ + uint64_t vram; + /* current visible VRAM usage */ + uint64_t visible_vram; + /* current GTT usage */ + uint64_t gtt; + /* current system memory usage */ + uint64_t cpu; + /* sum of evicted buffers, includes visible VRAM */ + uint64_t evicted_vram; + /* sum of evicted buffers due to CPU access */ + uint64_t evicted_visible_vram; + /* how much userspace asked for, includes vis.VRAM */ + uint64_t requested_vram; + /* how much userspace asked for */ + uint64_t requested_visible_vram; + /* how much userspace asked for */ + uint64_t requested_gtt; +}; + static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) { return container_of(tbo, struct amdgpu_bo, tbo); @@ -325,8 +346,8 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); -void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem); +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, + struct amdgpu_mem_stats *stats); void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 28fe6d941054..02f948adae72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -191,6 +191,7 @@ static int psp_early_init(void *handle) psp_v12_0_set_psp_funcs(psp); break; case IP_VERSION(13, 0, 2): + case IP_VERSION(13, 0, 6): psp_v13_0_set_psp_funcs(psp); break; case IP_VERSION(13, 0, 1): @@ -602,27 +603,14 @@ psp_cmd_submit_buf(struct psp_context *psp, struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) { int ret; - int index, idx; + int index; int timeout = 20000; bool ras_intr = false; bool skip_unsupport = false; - bool dev_entered; if (psp->adev->no_hw_access) return 0; - dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx); - /* - * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring - * a lock in drm_dev_enter during driver unload because we must call - * drm_dev_unplug as the beginning of unload driver sequence . It is very - * crucial that userspace can't access device instances anymore. - */ - if (!dev_entered) - WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD && - psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA && - psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD); - memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); @@ -686,8 +674,6 @@ psp_cmd_submit_buf(struct psp_context *psp, } exit: - if (dev_entered) - drm_dev_exit(idx); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 63dfcc98152d..11df6ee052b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2554,21 +2554,24 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* initialize nbio ras function ahead of any other * ras functions so hardware fatal error interrupt * can be enabled as early as possible */ - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - if (!adev->gmc.xgmi.connected_to_cpu) { + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 4, 0): + case IP_VERSION(7, 4, 1): + case IP_VERSION(7, 4, 4): + if (!adev->gmc.xgmi.connected_to_cpu) adev->nbio.ras = &nbio_v7_4_ras; - amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block); - adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm; - } break; default: /* nbio ras is not available */ break; } + /* nbio ras block needs to be enabled ahead of other ras blocks + * to handle fatal error */ + r = amdgpu_nbio_ras_sw_init(adev); + if (r) + return r; + if (adev->nbio.ras && adev->nbio.ras->init_ras_controller_interrupt) { r = adev->nbio.ras->init_ras_controller_interrupt(adev); @@ -3073,9 +3076,6 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, if (!adev || !ras_block_obj) return -EINVAL; - if (!amdgpu_ras_asic_supported(adev)) - return 0; - ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL); if (!ras_node) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 1b8574bc4463..da68ceaa024c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -208,6 +208,36 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); } +int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_umc_ras *ras; + + if (!adev->umc.ras) + return 0; + + ras = adev->umc.ras; + + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register umc ras block!\n"); + return err; + } + + strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->umc.ras_if = &ras->ras_block.ras_comm; + + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; + + if (ras->ras_block.ras_cb) + ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; + + return 0; +} + int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index f2bf979af588..d7f1229ff11f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -42,7 +42,7 @@ #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) #define LOOP_UMC_NODE_INST(node_inst) \ - for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++) + for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num) #define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \ LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst)) @@ -69,7 +69,7 @@ struct amdgpu_umc { /* number of umc instance with memory map register access */ uint32_t umc_inst_num; - /*number of umc node instance with memory map register access*/ + /* Total number of umc node instance including harvest one */ uint32_t node_inst_num; /* UMC regiser per channel offset */ @@ -82,8 +82,12 @@ struct amdgpu_umc { const struct amdgpu_umc_funcs *funcs; struct amdgpu_umc_ras *ras; + + /* active mask for umc node instance */ + unsigned long active_mask; }; +int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset); int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 632a6ded5735..6887109abb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1118,14 +1118,11 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, { struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; + uint32_t offset, data[4]; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint32_t data[4]; uint64_t addr; - long r; - int i; - unsigned offset_idx = 0; - unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; + int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, @@ -1134,16 +1131,15 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) return r; - if (adev->asic_type >= CHIP_VEGA10) { - offset_idx = 1 + ring->me; - offset[1] = adev->reg_offset[UVD_HWIP][0][1]; - offset[2] = adev->reg_offset[UVD_HWIP][1][1]; - } + if (adev->asic_type >= CHIP_VEGA10) + offset = adev->reg_offset[UVD_HWIP][ring->me][1]; + else + offset = UVD_BASE_SI; - data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); - data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); - data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); - data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); + data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0); + data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0); + data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0); + data[3] = PACKET0(offset + UVD_NO_OP, 0); ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); @@ -1160,14 +1156,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout(bo->tbo.base.resv, - DMA_RESV_USAGE_KERNEL, false, - msecs_to_jiffies(10)); - if (r == 0) - r = -ETIMEDOUT; - if (r < 0) - goto err_free; - r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 25217b05c0ea..e63fcc58e8e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -26,6 +26,7 @@ #include <linux/firmware.h> #include <linux/module.h> +#include <linux/dmi.h> #include <linux/pci.h> #include <linux/debugfs.h> #include <drm/drm_drv.h> @@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; + /* + * Some Steam Deck's BIOS versions are incompatible with the + * indirect SRAM mode, leading to amdgpu being unable to get + * properly probed (and even potentially crashing the kernel). + * Hence, check for these versions here - notice this is + * restricted to Vangogh (Deck's APU). + */ + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) { + const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); + + if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) || + !strncmp("F7A0114", bios_ver, 7))) { + adev->vcn.indirect_sram = false; + dev_info(adev->dev, + "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver); + } + } + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); @@ -1162,19 +1181,28 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, return 0; } -void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev) +int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev) { + int err; + struct amdgpu_vcn_ras *ras; + if (!adev->vcn.ras) - return; + return 0; + + ras = adev->vcn.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register vcn ras block!\n"); + return err; + } - amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block); + strcpy(ras->ras_block.ras_comm.name, "vcn"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; + adev->vcn.ras_if = &ras->ras_block.ras_comm; - strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn"); - adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN; - adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON; - adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm; + if (!ras->ras_block.ras_late_init) + ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->vcn.ras->ras_block.ras_late_init) - adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d3e2af902907..c730949ece7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -400,6 +400,6 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); -void amdgpu_vcn_set_ras_funcs(struct amdgpu_device *adev); +int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b9e9480448af..4f7bab52282a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), /* Indirect Reg Access enabled */ AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5), + /* AV1 Support MODE*/ + AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void) ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) #define amdgpu_sriov_is_normal(adev) \ ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) +#define amdgpu_sriov_is_av1_support(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b9441ab457ea..286e326bb4bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -867,6 +867,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, pages_addr[idx - 1] + PAGE_SIZE)) break; } + if (!contiguous) + count--; num_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE; } @@ -918,8 +920,8 @@ error_unlock: return r; } -void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem) +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, + struct amdgpu_mem_stats *stats) { struct amdgpu_bo_va *bo_va, *tmp; @@ -927,41 +929,36 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { if (!bo_va->base.bo) continue; - amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, - gtt_mem, cpu_mem); + amdgpu_bo_get_memory(bo_va->base.bo, stats); } spin_unlock(&vm->status_lock); } + /** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 856a64bc7a89..6f085f0b4ef3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -40,6 +40,7 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; struct amdgpu_bo_vm; +struct amdgpu_mem_stats; /* * GPUVM handling @@ -457,8 +458,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); -void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, - uint64_t *gtt_mem, uint64_t *cpu_mem); +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, + struct amdgpu_mem_stats *stats); int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_vm *vmbo, bool immediate); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 4340d08f7607..3fe24348d199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -308,7 +308,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = { .show = amdgpu_xgmi_show_attrs, }; -struct kobj_type amdgpu_xgmi_hive_type = { +static const struct kobj_type amdgpu_xgmi_hive_type = { .release = amdgpu_xgmi_hive_release, .sysfs_ops = &amdgpu_xgmi_hive_ops, .default_groups = amdgpu_xgmi_hive_groups, @@ -1048,12 +1048,30 @@ struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { struct amdgpu_xgmi_ras xgmi_ras = { .ras_block = { - .ras_comm = { - .name = "xgmi_wafl", - .block = AMDGPU_RAS_BLOCK__XGMI_WAFL, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - }, .hw_ops = &xgmi_ras_hw_ops, .ras_late_init = amdgpu_xgmi_ras_late_init, }, }; + +int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev) +{ + int err; + struct amdgpu_xgmi_ras *ras; + + if (!adev->gmc.xgmi.ras) + return 0; + + ras = adev->gmc.xgmi.ras; + err = amdgpu_ras_register_ras_block(adev, &ras->ras_block); + if (err) { + dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n"); + return err; + } + + strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl_pcs"); + ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL; + ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm; + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 30dcc1681b4e..86fbf56938f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -73,5 +73,6 @@ static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, adev->gmc.xgmi.hive_id && adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id); } +int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 6c97148ca0ed..24d42d24e6a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags { uint32_t mm_bw_management : 1; uint32_t pp_one_vf_mode : 1; uint32_t reg_indirect_acc : 1; - uint32_t reserved : 26; + uint32_t av1_support : 1; + uint32_t reserved : 25; } flags; uint32_t all; }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 6983acc456b2..516409989235 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7266,7 +7266,6 @@ static int gfx_v10_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; - uint32_t tmp; amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); @@ -7285,17 +7284,9 @@ static int gfx_v10_0_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) { gfx_v10_0_cp_gfx_enable(adev, false); - /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */ - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { - tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); - tmp &= 0xffffff00; - WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp); - } else { - tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); - tmp &= 0xffffff00; - WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); - } - + /* Remove the steps of clearing KIQ position. + * It causes GFX hang when another Win guest is rendering. + */ return 0; } gfx_v10_0_cp_enable(adev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 3bf697a80cf2..ecf8ceb53311 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1287,6 +1287,11 @@ static int gfx_v11_0_sw_init(void *handle) break; } + /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */ + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) && + amdgpu_sriov_is_pp_one_vf(adev)) + adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG; + /* EOP Event */ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP, GFX_11_0_0__SRCID__CP_EOP_INTERRUPT, @@ -4655,6 +4660,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle) return false; } +static int gfx_v11_0_post_soft_reset(void *handle) +{ + /** + * GFX soft reset will impact MES, need resume MES when do GFX soft reset + */ + return amdgpu_mes_resume((struct amdgpu_device *)handle); +} + static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -6166,6 +6179,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = { .wait_for_idle = gfx_v11_0_wait_for_idle, .soft_reset = gfx_v11_0_soft_reset, .check_soft_reset = gfx_v11_0_check_soft_reset, + .post_soft_reset = gfx_v11_0_post_soft_reset, .set_clockgating_state = gfx_v11_0_set_clockgating_state, .set_powergating_state = gfx_v11_0_set_powergating_state, .get_clockgating_state = gfx_v11_0_get_clockgating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index ab2556ca984e..d99821692ba3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -699,25 +699,8 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } - if (adev->umc.ras) { - amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - - strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); - adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->umc.ras->ras_block.ras_late_init) - adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->umc.ras->ras_block.ras_cb) - adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; - } } - static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->ip_versions[MMHUB_HWIP][0]) { @@ -754,7 +737,6 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) static int gmc_v10_0_early_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v10_0_set_mmhub_funcs(adev); @@ -770,10 +752,6 @@ static int gmc_v10_0_early_init(void *handle) adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; - r = amdgpu_gmc_ras_early_init(adev); - if (r) - return r; - return 0; } @@ -1024,6 +1002,10 @@ static int gmc_v10_0_sw_init(void *handle) amdgpu_vm_manager_init(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 85e0afc3d4f7..fad199ed15f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -567,7 +567,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) case IP_VERSION(8, 10, 0): adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM; - adev->umc.node_inst_num = adev->gmc.num_umc; adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev); adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET; adev->umc.retire_unit = UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; @@ -582,23 +581,6 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } - - if (adev->umc.ras) { - amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - - strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); - adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->umc.ras->ras_block.ras_late_init) - adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; - - /* If not define special ras_cb function, use default ras_cb */ - if (!adev->umc.ras->ras_block.ras_cb) - adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; - } } @@ -847,6 +829,10 @@ static int gmc_v11_0_sw_init(void *handle) amdgpu_vm_manager_init(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -876,6 +862,12 @@ static int gmc_v11_0_sw_fini(void *handle) static void gmc_v11_0_init_golden_registers(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) { + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; + + WREG32(hub->vm_contexts_disable, 0); + return; + } } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b06170c00dfc..2a8dc9b52c2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1318,23 +1318,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } - - if (adev->umc.ras) { - amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - - strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc"); - adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC; - adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm; - - /* If don't define special ras_late_init function, use default ras_late_init */ - if (!adev->umc.ras->ras_block.ras_late_init) - adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; - - /* If not defined special ras_cb function, use default ras_cb */ - if (!adev->umc.ras->ras_block.ras_cb) - adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb; - } } static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) @@ -1368,15 +1351,6 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) /* mmhub ras is not available */ break; } - - if (adev->mmhub.ras) { - amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block); - - strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub"); - adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB; - adev->mmhub.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; - adev->mmhub.ras_if = &adev->mmhub.ras->ras_block.ras_comm; - } } static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) @@ -1387,26 +1361,34 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) { adev->hdp.ras = &hdp_v4_0_ras; - amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block); - adev->hdp.ras_if = &adev->hdp.ras->ras_block.ras_comm; } -static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) +static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev) { + struct amdgpu_mca *mca = &adev->mca; + /* is UMC the right IP to check for MCA? Maybe DF? */ switch (adev->ip_versions[UMC_HWIP][0]) { case IP_VERSION(6, 7, 0): - if (!adev->gmc.xgmi.connected_to_cpu) - adev->mca.funcs = &mca_v3_0_funcs; + if (!adev->gmc.xgmi.connected_to_cpu) { + mca->mp0.ras = &mca_v3_0_mp0_ras; + mca->mp1.ras = &mca_v3_0_mp1_ras; + mca->mpio.ras = &mca_v3_0_mpio_ras; + } break; default: break; } } +static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev) +{ + if (!adev->gmc.xgmi.connected_to_cpu) + adev->gmc.xgmi.ras = &xgmi_ras; +} + static int gmc_v9_0_early_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */ @@ -1427,7 +1409,8 @@ static int gmc_v9_0_early_init(void *handle) gmc_v9_0_set_mmhub_ras_funcs(adev); gmc_v9_0_set_gfxhub_funcs(adev); gmc_v9_0_set_hdp_ras_funcs(adev); - gmc_v9_0_set_mca_funcs(adev); + gmc_v9_0_set_mca_ras_funcs(adev); + gmc_v9_0_set_xgmi_ras_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -1436,10 +1419,6 @@ static int gmc_v9_0_early_init(void *handle) adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; - r = amdgpu_gmc_ras_early_init(adev); - if (r) - return r; - return 0; } @@ -1644,8 +1623,6 @@ static int gmc_v9_0_sw_init(void *handle) adev->gfxhub.funcs->init(adev); adev->mmhub.funcs->init(adev); - if (adev->mca.funcs) - adev->mca.funcs->init(adev); spin_lock_init(&adev->gmc.invalidate_lock); @@ -1798,6 +1775,10 @@ static int gmc_v9_0_sw_init(void *handle) gmc_v9_0_save_registers(adev); + r = amdgpu_gmc_ras_sw_init(adev); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index adf89680f53e..71d1a2e3bac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -49,7 +49,8 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0)) + if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0) || + adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 2)) return; if (!ring || !ring->funcs->emit_wreg) @@ -160,11 +161,6 @@ struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = { struct amdgpu_hdp_ras hdp_v4_0_ras = { .ras_block = { - .ras_comm = { - .name = "hdp", - .block = AMDGPU_RAS_BLOCK__HDP, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - }, .hw_ops = &hdp_v4_0_ras_hw_ops, }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index f2b743a93915..6b1887808782 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -138,6 +138,10 @@ static int jpeg_v2_5_sw_init(void *handle) adev->jpeg.inst[i].external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, i, mmUVD_JPEG_PITCH); } + r = amdgpu_jpeg_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -806,6 +810,4 @@ static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - jpeg_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index 3beb731b2ce5..3129094baccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -113,6 +113,10 @@ static int jpeg_v4_0_sw_init(void *handle) adev->jpeg.internal.jpeg_pitch = regUVD_JPEG_PITCH_INTERNAL_OFFSET; adev->jpeg.inst->external.jpeg_pitch = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH); + r = amdgpu_jpeg_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -685,6 +689,4 @@ static void jpeg_v4_0_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - jpeg_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index d4bd7d1d2649..6dae4a2e2767 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -51,19 +51,13 @@ static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj, return -EINVAL; } -const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = { +static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = { .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, .query_ras_error_address = NULL, }; struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = { .ras_block = { - .ras_comm = { - .block = AMDGPU_RAS_BLOCK__MCA, - .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .name = "mp0", - }, .hw_ops = &mca_v3_0_mp0_hw_ops, .ras_block_match = mca_v3_0_ras_block_match, }, @@ -77,19 +71,13 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = { +static const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = { .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, .query_ras_error_address = NULL, }; struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = { .ras_block = { - .ras_comm = { - .block = AMDGPU_RAS_BLOCK__MCA, - .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .name = "mp1", - }, .hw_ops = &mca_v3_0_mp1_hw_ops, .ras_block_match = mca_v3_0_ras_block_match, }, @@ -103,40 +91,14 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = { +static const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = { .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, .query_ras_error_address = NULL, }; struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = { .ras_block = { - .ras_comm = { - .block = AMDGPU_RAS_BLOCK__MCA, - .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .name = "mpio", - }, .hw_ops = &mca_v3_0_mpio_hw_ops, .ras_block_match = mca_v3_0_ras_block_match, }, }; - - -static void mca_v3_0_init(struct amdgpu_device *adev) -{ - struct amdgpu_mca *mca = &adev->mca; - - mca->mp0.ras = &mca_v3_0_mp0_ras; - mca->mp1.ras = &mca_v3_0_mp1_ras; - mca->mpio.ras = &mca_v3_0_mpio_ras; - amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block); - amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block); - amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block); - mca->mp0.ras_if = &mca->mp0.ras->ras_block.ras_comm; - mca->mp1.ras_if = &mca->mp1.ras->ras_block.ras_comm; - mca->mpio.ras_if = &mca->mpio.ras->ras_block.ras_comm; -} - -const struct amdgpu_mca_funcs mca_v3_0_funcs = { - .init = mca_v3_0_init, -};
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h index b899b86194c2..d3eaef0d7f2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h @@ -21,6 +21,8 @@ #ifndef __MCA_V3_0_H__ #define __MCA_V3_0_H__ -extern const struct amdgpu_mca_funcs mca_v3_0_funcs; +extern struct amdgpu_mca_ras_block mca_v3_0_mp0_ras; +extern struct amdgpu_mca_ras_block mca_v3_0_mp1_ras; +extern struct amdgpu_mca_ras_block mca_v3_0_mpio_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c index 164948c50ac3..17a792616979 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c @@ -517,6 +517,9 @@ static void mmhub_v3_0_init(struct amdgpu_device *adev) hub->vm_l2_bank_select_reserved_cid2 = SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_BANK_SELECT_RESERVED_CID2); + hub->vm_contexts_disable = + SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXTS_DISABLE); + hub->vmhub_funcs = &mmhub_v3_0_vmhub_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 4b0d563c6522..4ef1fa4603c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data); break; - case IP_VERSION(7, 5, 1): - data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2); - data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK; - WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data); - fallthrough; default: def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, @@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) break; } + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 3, 0): + case IP_VERSION(7, 5, 1): + data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2); + data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK; + WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data); + break; + } + if (amdgpu_sriov_vf(adev)) adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d972025f0d20..47420b403871 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -280,47 +280,6 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, } } -/* - * Indirect registers accessor - */ -static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg(adev, address, data, reg); -} - -static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg(adev, address, data, reg, v); -} - -static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg64(adev, address, data, reg); -} - -static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg64(adev, address, data, reg, v); -} - static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -444,9 +403,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { en = &nv_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = nv_get_register_value(adev, @@ -560,24 +520,9 @@ static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return 0; } -static void nv_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void nv_program_aspm(struct amdgpu_device *adev) { - if (!amdgpu_device_should_use_aspm(adev)) + if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) return; if (!(adev->flags & AMD_IS_APU) && @@ -607,11 +552,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev) adev->virt.ops = &xgpu_nv_virt_ops; } -static uint32_t nv_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio.funcs->get_rev_id(adev); -} - static bool nv_need_full_reset(struct amdgpu_device *adev) { return true; @@ -737,10 +677,10 @@ static int nv_common_early_init(void *handle) } adev->smc_rreg = NULL; adev->smc_wreg = NULL; - adev->pcie_rreg = &nv_pcie_rreg; - adev->pcie_wreg = &nv_pcie_wreg; - adev->pcie_rreg64 = &nv_pcie_rreg64; - adev->pcie_wreg64 = &nv_pcie_wreg64; + adev->pcie_rreg = &amdgpu_device_indirect_rreg; + adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; + adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->pciep_rreg = amdgpu_device_pcie_port_rreg; adev->pciep_wreg = amdgpu_device_pcie_port_wreg; @@ -753,7 +693,7 @@ static int nv_common_early_init(void *handle) adev->asic_funcs = &nv_asic_funcs; - adev->rev_id = nv_get_rev_id(adev); + adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xff; /* TODO: split the GC and PG flags based on the relevant IP version for which * they are relevant. @@ -1054,8 +994,8 @@ static int nv_common_late_init(void *handle) amdgpu_virt_update_sriov_video_codec(adev, sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array), - sriov_sc_video_codecs_decode_array_vcn1, - ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1)); + sriov_sc_video_codecs_decode_array_vcn0, + ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0)); } } @@ -1087,8 +1027,6 @@ static int nv_common_hw_init(void *handle) if (adev->nbio.funcs->apply_l1_link_width_reconfig_wa) adev->nbio.funcs->apply_l1_link_width_reconfig_wa(adev); - /* enable pcie gen2/3 link */ - nv_pcie_gen3_enable(adev); /* enable aspm */ nv_program_aspm(adev); /* setup nbio registers */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index d62fcc77af95..caee76ab7110 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -48,6 +48,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 @@ -100,6 +101,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) return err; break; case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 10): err = psp_init_sos_microcode(psp, ucode_prefix); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c new file mode 100644 index 000000000000..1b04700a4d55 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -0,0 +1,1967 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "amdgpu.h" +#include "amdgpu_ucode.h" +#include "amdgpu_trace.h" + +#include "sdma/sdma_4_4_2_offset.h" +#include "sdma/sdma_4_4_2_sh_mask.h" + +#include "soc15_common.h" +#include "soc15.h" +#include "vega10_sdma_pkt_open.h" + +#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h" +#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h" + +#include "amdgpu_ras.h" + +MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); + +#define WREG32_SDMA(instance, offset, value) \ + WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value) +#define RREG32_SDMA(instance, offset) \ + RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset))) + +static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev); +static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev); + +static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, + u32 instance, u32 offset) +{ + return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset); +} + +static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num) +{ + switch (seq_num) { + case 0: + return SOC15_IH_CLIENTID_SDMA0; + case 1: + return SOC15_IH_CLIENTID_SDMA1; + case 2: + return SOC15_IH_CLIENTID_SDMA2; + case 3: + return SOC15_IH_CLIENTID_SDMA3; + default: + return -EINVAL; + } +} + +static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id) +{ + switch (client_id) { + case SOC15_IH_CLIENTID_SDMA0: + return 0; + case SOC15_IH_CLIENTID_SDMA1: + return 1; + case SOC15_IH_CLIENTID_SDMA2: + return 2; + case SOC15_IH_CLIENTID_SDMA3: + return 3; + default: + return -EINVAL; + } +} + +static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 4, 2): + break; + default: + break; + } +} + +/** + * sdma_v4_4_2_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev) +{ + int ret, i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) { + ret = amdgpu_sdma_init_microcode(adev, 0, true); + break; + } else { + ret = amdgpu_sdma_init_microcode(adev, i, false); + if (ret) + return ret; + } + } + + return ret; +} + +/** + * sdma_v4_4_2_ring_get_rptr - get the current read pointer + * + * @ring: amdgpu ring pointer + * + * Get the current rptr from the hardware. + */ +static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring) +{ + u64 *rptr; + + /* XXX check if swapping is necessary on BE */ + rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); + + DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr); + return ((*rptr) >> 2); +} + +/** + * sdma_v4_4_2_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware. + */ +static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); + } else { + wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI); + wptr = wptr << 32; + wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR); + DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", + ring->me, wptr); + } + + return wptr >> 2; +} + +/** + * sdma_v4_4_2_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware. + */ +static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + DRM_DEBUG("Setting write pointer\n"); + if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + + DRM_DEBUG("Using doorbell -- " + "wptr_offs == 0x%08x " + "lower_32_bits(ring->wptr) << 2 == 0x%08x " + "upper_32_bits(ring->wptr) << 2 == 0x%08x\n", + ring->wptr_offs, + lower_32_bits(ring->wptr << 2), + upper_32_bits(ring->wptr << 2)); + /* XXX check if swapping is necessary on BE */ + WRITE_ONCE(*wb, (ring->wptr << 2)); + DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", + ring->doorbell_index, ring->wptr << 2); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + DRM_DEBUG("Not using doorbell -- " + "regSDMA%i_GFX_RB_WPTR == 0x%08x " + "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n", + ring->me, + lower_32_bits(ring->wptr << 2), + ring->me, + upper_32_bits(ring->wptr << 2)); + WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR, + lower_32_bits(ring->wptr << 2)); + WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI, + upper_32_bits(ring->wptr << 2)); + } +} + +/** + * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer + * + * @ring: amdgpu ring pointer + * + * Get the current wptr from the hardware. + */ +static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u64 wptr; + + if (ring->use_doorbell) { + /* XXX check if swapping is necessary on BE */ + wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); + } else { + wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI); + wptr = wptr << 32; + wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR); + } + + return wptr >> 2; +} + +/** + * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer + * + * @ring: amdgpu ring pointer + * + * Write the wptr back to the hardware. + */ +static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring->use_doorbell) { + u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; + + /* XXX check if swapping is necessary on BE */ + WRITE_ONCE(*wb, (ring->wptr << 2)); + WDOORBELL64(ring->doorbell_index, ring->wptr << 2); + } else { + uint64_t wptr = ring->wptr << 2; + + WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR, + lower_32_bits(wptr)); + WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI, + upper_32_bits(wptr)); + } +} + +static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); + int i; + + for (i = 0; i < count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + amdgpu_ring_write(ring, ring->funcs->nop | + SDMA_PKT_NOP_HEADER_COUNT(count - 1)); + else + amdgpu_ring_write(ring, ring->funcs->nop); +} + +/** + * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine + * + * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from + * @ib: IB object to schedule + * @flags: unused + * + * Schedule an IB in the DMA ring. + */ +static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) +{ + unsigned vmid = AMDGPU_JOB_GET_VMID(job); + + /* IB packet must end on a 8 DW boundary */ + sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | + SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); + /* base must be 32 byte aligned */ + amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); + amdgpu_ring_write(ring, ib->length_dw); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0); + +} + +static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring, + int mem_space, int hdp, + uint32_t addr0, uint32_t addr1, + uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) | + SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + if (mem_space) { + /* memory */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + } else { + /* registers */ + amdgpu_ring_write(ring, addr0 << 2); + amdgpu_ring_write(ring, addr1 << 2); + } + amdgpu_ring_write(ring, ref); /* reference */ + amdgpu_ring_write(ring, mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */ +} + +/** + * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring + * + * @ring: amdgpu ring pointer + * + * Emit an hdp flush packet on the requested DMA ring. + */ +static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + u32 ref_and_mask = 0; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; + + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; + + sdma_v4_4_2_wait_reg_mem(ring, 0, 1, + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + ref_and_mask, ref_and_mask, 10); +} + +/** + * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring + * + * @ring: amdgpu ring pointer + * @addr: address + * @seq: sequence number + * @flags: fence related flags + * + * Add a DMA fence packet to the ring to write + * the fence seq number and DMA trap packet to generate + * an interrupt if needed. + */ +static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) +{ + bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; + /* write the fence */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + /* zero in first two bits */ + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + /* optionally write high bits as well */ + if (write64bit) { + addr += 4; + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); + /* zero in first two bits */ + BUG_ON(addr & 0x3); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(seq)); + } + + /* generate an interrupt */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); + amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); +} + + +/** + * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the gfx async dma ring buffers. + */ +static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; + u32 rb_cntl, ib_cntl; + int i, unset = 0; + + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma[i] = &adev->sdma.instance[i].ring; + + if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + unset = 1; + } + + rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); + ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + } +} + +/** + * sdma_v4_4_2_rlc_stop - stop the compute async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the compute async dma queues. + */ +static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev) +{ + /* XXX todo */ +} + +/** + * sdma_v4_4_2_page_stop - stop the page async dma engines + * + * @adev: amdgpu_device pointer + * + * Stop the page async dma ring buffers. + */ +static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev) +{ + struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; + u32 rb_cntl, ib_cntl; + int i; + bool unset = false; + + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma[i] = &adev->sdma.instance[i].page; + + if ((adev->mman.buffer_funcs_ring == sdma[i]) && + (!unset)) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + unset = true; + } + + rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, + RB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl); + ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, + IB_ENABLE, 0); + WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl); + } +} + +/** + * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch. + */ +static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl, phase_quantum = 0; + int i; + + if (amdgpu_sdma_phase_quantum) { + unsigned value = amdgpu_sdma_phase_quantum; + unsigned unit = 0; + + while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >> + SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) { + value = (value + 1) >> 1; + unit++; + } + if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >> + SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) { + value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >> + SDMA_PHASE0_QUANTUM__VALUE__SHIFT); + unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >> + SDMA_PHASE0_QUANTUM__UNIT__SHIFT); + WARN_ONCE(1, + "clamping sdma_phase_quantum to %uK clock cycles\n", + value << unit); + } + phase_quantum = + value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT | + unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32_SDMA(i, regSDMA_CNTL); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum); + WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum); + WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum); + } + WREG32_SDMA(i, regSDMA_CNTL, f32_cntl); + + /* Extend page fault timeout to avoid interrupt storm */ + WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080); + } + +} + +/** + * sdma_v4_4_2_enable - stop the async dma engines + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. + * + * Halt or unhalt the async dma engines. + */ +static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + if (!enable) { + sdma_v4_4_2_gfx_stop(adev); + sdma_v4_4_2_rlc_stop(adev); + if (adev->sdma.has_page_queue) + sdma_v4_4_2_page_stop(adev); + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1); + WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl); + } +} + +/* + * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl + */ +static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl) +{ + /* Set ring buffer size in dwords */ + uint32_t rb_bufsz = order_base_2(ring->ring_size / 4); + + barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz); +#ifdef __BIG_ENDIAN + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, + RPTR_WRITEBACK_SWAP_ENABLE, 1); +#endif + return rb_cntl; +} + +/** + * sdma_v4_4_2_gfx_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * @i: instance to resume + * + * Set up the gfx DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i) +{ + struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; + u32 wb_offset; + u32 doorbell; + u32 doorbell_offset; + u64 wptr_gpu_addr; + + wb_offset = (ring->rptr_offs * 4); + + rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL); + rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl); + WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0); + + /* set the wb address whether it's enabled or not */ + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI, + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO, + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, + RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8); + WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1); + + doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); + doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, + ring->use_doorbell); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA_GFX_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell); + WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset); + + sdma_v4_4_2_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0); + + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA_GFX_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); + WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1); + WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl); + + ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + + ring->sched.ready = true; +} + +/** + * sdma_v4_4_2_page_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * @i: instance to resume + * + * Set up the page DMA ring buffers and enable them. + * Returns 0 for success, error for failure. + */ +static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i) +{ + struct amdgpu_ring *ring = &adev->sdma.instance[i].page; + u32 rb_cntl, ib_cntl, wptr_poll_cntl; + u32 wb_offset; + u32 doorbell; + u32 doorbell_offset; + u64 wptr_gpu_addr; + + wb_offset = (ring->rptr_offs * 4); + + rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL); + rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl); + WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0); + + /* set the wb address whether it's enabled or not */ + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI, + upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); + WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO, + lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); + + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, + RPTR_WRITEBACK_ENABLE, 1); + + WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8); + WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40); + + ring->wptr = 0; + + /* before programing wptr to a less value, need set minor_ptr_update first */ + WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1); + + doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL); + doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET); + + doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE, + ring->use_doorbell); + doorbell_offset = REG_SET_FIELD(doorbell_offset, + SDMA_PAGE_DOORBELL_OFFSET, + OFFSET, ring->doorbell_index); + WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell); + WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset); + + /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */ + sdma_v4_4_2_page_ring_set_wptr(ring); + + /* set minor_ptr_update to 0 after wptr programed */ + WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0); + + /* setup the wptr shadow polling */ + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL); + wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, + SDMA_PAGE_RB_WPTR_POLL_CNTL, + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); + WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); + + /* enable DMA RB */ + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1); + WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl); + + ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL); + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1); +#ifdef __BIG_ENDIAN + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1); +#endif + /* enable DMA IBs */ + WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl); + + ring->sched.ready = true; +} + +static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev) +{ + +} + +/** + * sdma_v4_4_2_rlc_resume - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the compute DMA queues and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev) +{ + sdma_v4_4_2_init_pg(adev); + + return 0; +} + +/** + * sdma_v4_4_2_load_microcode - load the sDMA ME ucode + * + * @adev: amdgpu_device pointer + * + * Loads the sDMA0/1 ucode. + * Returns 0 for success, -EINVAL if the ucode is not available. + */ +static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev) +{ + const struct sdma_firmware_header_v1_0 *hdr; + const __le32 *fw_data; + u32 fw_size; + int i, j; + + /* halt the MEs */ + sdma_v4_4_2_enable(adev, false); + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (!adev->sdma.instance[i].fw) + return -EINVAL; + + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; + amdgpu_ucode_print_sdma_hdr(&hdr->header); + fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; + + fw_data = (const __le32 *) + (adev->sdma.instance[i].fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + + WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0); + + for (j = 0; j < fw_size; j++) + WREG32_SDMA(i, regSDMA_UCODE_DATA, + le32_to_cpup(fw_data++)); + + WREG32_SDMA(i, regSDMA_UCODE_ADDR, + adev->sdma.instance[i].fw_version); + } + + return 0; +} + +/** + * sdma_v4_4_2_start - setup and start the async dma engines + * + * @adev: amdgpu_device pointer + * + * Set up the DMA engines and enable them. + * Returns 0 for success, error for failure. + */ +static int sdma_v4_4_2_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i, r = 0; + + if (amdgpu_sriov_vf(adev)) { + sdma_v4_4_2_ctx_switch_enable(adev, false); + sdma_v4_4_2_enable(adev, false); + } else { + /* bypass sdma microcode loading on Gopher */ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP && + !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) { + r = sdma_v4_4_2_load_microcode(adev); + if (r) + return r; + } + + /* unhalt the MEs */ + sdma_v4_4_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v4_4_2_ctx_switch_enable(adev, true); + } + + /* start the gfx rings and rlc compute queues */ + for (i = 0; i < adev->sdma.num_instances; i++) { + uint32_t temp; + + WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); + sdma_v4_4_2_gfx_resume(adev, i); + if (adev->sdma.has_page_queue) + sdma_v4_4_2_page_resume(adev, i); + + /* set utc l1 enable flag always to 1 */ + temp = RREG32_SDMA(i, regSDMA_CNTL); + temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1); + WREG32_SDMA(i, regSDMA_CNTL, temp); + + if (!amdgpu_sriov_vf(adev)) { + ring = &adev->sdma.instance[i].ring; + adev->nbio.funcs->sdma_doorbell_range(adev, i, + ring->use_doorbell, ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + + /* unhalt engine */ + temp = RREG32_SDMA(i, regSDMA_F32_CNTL); + temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0); + WREG32_SDMA(i, regSDMA_F32_CNTL, temp); + } + } + + if (amdgpu_sriov_vf(adev)) { + sdma_v4_4_2_ctx_switch_enable(adev, true); + sdma_v4_4_2_enable(adev, true); + } else { + r = sdma_v4_4_2_rlc_resume(adev); + if (r) + return r; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + + if (adev->sdma.has_page_queue) { + struct amdgpu_ring *page = &adev->sdma.instance[i].page; + + r = amdgpu_ring_test_helper(page); + if (r) + return r; + + if (adev->mman.buffer_funcs_ring == page) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } + + if (adev->mman.buffer_funcs_ring == ring) + amdgpu_ttm_set_buffer_funcs_status(adev, true); + } + + return r; +} + +/** + * sdma_v4_4_2_ring_test_ring - simple async dma engine test + * + * @ring: amdgpu_ring structure holding ring information + * + * Test the DMA engine by writing using it to write an + * value to memory. + * Returns 0 for success, error for failure. + */ +static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + unsigned i; + unsigned index; + int r; + u32 tmp; + u64 gpu_addr; + + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + + r = amdgpu_ring_alloc(ring, 5); + if (r) + goto error_free_wb; + + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); + amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); + amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); + amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + +error_free_wb: + amdgpu_device_wb_free(adev, index); + return r; +} + +/** + * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine + * + * @ring: amdgpu_ring structure holding ring information + * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT + * + * Test a simple IB in the DMA ring. + * Returns 0 on success, error on failure. + */ +static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_ib ib; + struct dma_fence *f = NULL; + unsigned index; + long r; + u32 tmp = 0; + u64 gpu_addr; + + r = amdgpu_device_wb_get(adev, &index); + if (r) + return r; + + gpu_addr = adev->wb.gpu_addr + (index * 4); + tmp = 0xCAFEDEAD; + adev->wb.wb[index] = cpu_to_le32(tmp); + memset(&ib, 0, sizeof(ib)); + r = amdgpu_ib_get(adev, NULL, 256, + AMDGPU_IB_POOL_DIRECT, &ib); + if (r) + goto err0; + + ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib.ptr[1] = lower_32_bits(gpu_addr); + ib.ptr[2] = upper_32_bits(gpu_addr); + ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0); + ib.ptr[4] = 0xDEADBEEF; + ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); + ib.length_dw = 8; + + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); + if (r) + goto err1; + + r = dma_fence_wait_timeout(f, false, timeout); + if (r == 0) { + r = -ETIMEDOUT; + goto err1; + } else if (r < 0) { + goto err1; + } + tmp = le32_to_cpu(adev->wb.wb[index]); + if (tmp == 0xDEADBEEF) + r = 0; + else + r = -EINVAL; + +err1: + amdgpu_ib_free(adev, &ib, NULL); + dma_fence_put(f); +err0: + amdgpu_device_wb_free(adev, index); + return r; +} + + +/** + * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @src: src addr to copy from + * @count: number of page entries to update + * + * Update PTEs by copying them from the GART using sDMA. + */ +static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib, + uint64_t pe, uint64_t src, + unsigned count) +{ + unsigned bytes = count * 8; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); + ib->ptr[ib->length_dw++] = bytes - 1; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src); + ib->ptr[ib->length_dw++] = upper_32_bits(src); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + +} + +/** + * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @value: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * + * Update PTEs by writing them manually using sDMA. + */ +static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, + uint64_t value, unsigned count, + uint32_t incr) +{ + unsigned ndw = count * 2; + + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = ndw - 1; + for (; ndw > 0; ndw -= 2) { + ib->ptr[ib->length_dw++] = lower_32_bits(value); + ib->ptr[ib->length_dw++] = upper_32_bits(value); + value += incr; + } +} + +/** + * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA + * + * @ib: indirect buffer to fill with commands + * @pe: addr of the page entry + * @addr: dst addr to write into pe + * @count: number of page entries to update + * @incr: increase next addr by incr bytes + * @flags: access flags + * + * Update the page tables using sDMA. + */ +static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib, + uint64_t pe, + uint64_t addr, unsigned count, + uint32_t incr, uint64_t flags) +{ + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE); + ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ + ib->ptr[ib->length_dw++] = upper_32_bits(flags); + ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(addr); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = count - 1; /* number of entries */ +} + +/** + * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw + * + * @ring: amdgpu_ring structure holding ring information + * @ib: indirect buffer to fill with padding + */ +static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) +{ + struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); + u32 pad_count; + int i; + + pad_count = (-ib->length_dw) & 7; + for (i = 0; i < pad_count; i++) + if (sdma && sdma->burst_nop && (i == 0)) + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | + SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); + else + ib->ptr[ib->length_dw++] = + SDMA_PKT_HEADER_OP(SDMA_OP_NOP); +} + + +/** + * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline + * + * @ring: amdgpu_ring pointer + * + * Make sure all previous operations are completed (CIK). + */ +static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + /* wait for idle */ + sdma_v4_4_2_wait_reg_mem(ring, 1, 0, + addr & 0xfffffffc, + upper_32_bits(addr) & 0xffffffff, + seq, 0xffffffff, 4); +} + + +/** + * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA + * + * @ring: amdgpu_ring pointer + * @vmid: vmid number to use + * @pd_addr: address + * + * Update the page table base and flush the VM TLB + * using sDMA. + */ +static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) +{ + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); +} + +static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | + SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, val); +} + +static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) +{ + sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); +} + +static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[SDMA0_HWIP][0]) { + case IP_VERSION(4, 4, 2): + return false; + default: + return false; + } +} + +static int sdma_v4_4_2_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = sdma_v4_4_2_init_microcode(adev); + if (r) { + DRM_ERROR("Failed to load sdma firmware!\n"); + return r; + } + + /* TODO: Page queue breaks driver reload under SRIOV */ + if (sdma_v4_4_2_fw_support_paging_queue(adev)) + adev->sdma.has_page_queue = true; + + sdma_v4_4_2_set_ring_funcs(adev); + sdma_v4_4_2_set_buffer_funcs(adev); + sdma_v4_4_2_set_vm_pte_funcs(adev); + sdma_v4_4_2_set_irq_funcs(adev); + + return 0; +} + +#if 0 +static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +#endif + +static int sdma_v4_4_2_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +#if 0 + struct ras_ih_if ih_info = { + .cb = sdma_v4_4_2_process_ras_data_cb, + }; +#endif + if (!amdgpu_persistent_edc_harvesting_supported(adev)) { + if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && + adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count) + adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev); + } + + return 0; +} + +static int sdma_v4_4_2_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int r, i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* SDMA trap event */ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_TRAP, + &adev->sdma.trap_irq); + if (r) + return r; + } + + /* SDMA SRAM ECC event */ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_SRAM_ECC, + &adev->sdma.ecc_irq); + if (r) + return r; + } + + /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_VM_HOLE, + &adev->sdma.vm_hole_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID, + &adev->sdma.doorbell_invalid_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT, + &adev->sdma.pool_timeout_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_SRBMWRITE, + &adev->sdma.srbm_write_irq); + if (r) + return r; + } + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + ring->ring_obj = NULL; + ring->use_doorbell = true; + + DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, + ring->use_doorbell?"true":"false"); + + /* doorbell size is 2 dwords, get DWORD offset */ + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; + + sprintf(ring->name, "sdma%d", i); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + + if (adev->sdma.has_page_queue) { + ring = &adev->sdma.instance[i].page; + ring->ring_obj = NULL; + ring->use_doorbell = true; + + /* paging queue use same doorbell index/routing as gfx queue + * with 0x400 (4096 dwords) offset on second doorbell page + */ + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; + ring->doorbell_index += 0x400; + + sprintf(ring->name, "page%d", i); + r = amdgpu_ring_init(adev, ring, 1024, + &adev->sdma.trap_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i, + AMDGPU_RING_PRIO_DEFAULT, NULL); + if (r) + return r; + } + } + + return r; +} + +static int sdma_v4_4_2_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_ring_fini(&adev->sdma.instance[i].ring); + if (adev->sdma.has_page_queue) + amdgpu_ring_fini(&adev->sdma.instance[i].page); + } + + if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) + amdgpu_sdma_destroy_inst_ctx(adev, true); + else + amdgpu_sdma_destroy_inst_ctx(adev, false); + + return 0; +} + +static int sdma_v4_4_2_hw_init(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->flags & AMD_IS_APU) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); + + if (!amdgpu_sriov_vf(adev)) + sdma_v4_4_2_init_golden_registers(adev); + + r = sdma_v4_4_2_start(adev); + + return r; +} + +static int sdma_v4_4_2_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + if (amdgpu_sriov_vf(adev)) + return 0; + + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + } + + sdma_v4_4_2_ctx_switch_enable(adev, false); + sdma_v4_4_2_enable(adev, false); + + return 0; +} + +static int sdma_v4_4_2_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return sdma_v4_4_2_hw_fini(adev); +} + +static int sdma_v4_4_2_resume(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return sdma_v4_4_2_hw_init(adev); +} + +static bool sdma_v4_4_2_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG); + + if (!(tmp & SDMA_STATUS_REG__IDLE_MASK)) + return false; + } + + return true; +} + +static int sdma_v4_4_2_wait_for_idle(void *handle) +{ + unsigned i, j; + u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + for (j = 0; j < adev->sdma.num_instances; j++) { + sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG); + if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK)) + break; + } + if (j == adev->sdma.num_instances) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static int sdma_v4_4_2_soft_reset(void *handle) +{ + /* todo */ + + return 0; +} + +static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_cntl; + + sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); + + return 0; +} + +static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t instance; + + DRM_DEBUG("IH: SDMA trap\n"); + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + switch (entry->ring_id) { + case 0: + amdgpu_fence_process(&adev->sdma.instance[instance].ring); + break; + default: + break; + } + return 0; +} + +#if 0 +static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + int instance; + + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + goto out; + + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + if (instance < 0) + goto out; + + amdgpu_sdma_process_ras_data_cb(adev, err_data, entry); + +out: + return AMDGPU_RAS_SUCCESS; +} +#endif + +static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int instance; + + DRM_ERROR("Illegal instruction in SDMA command stream\n"); + + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + if (instance < 0) + return 0; + + switch (entry->ring_id) { + case 0: + drm_sched_fault(&adev->sdma.instance[instance].ring.sched); + break; + } + return 0; +} + +static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 sdma_edc_config; + + sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG); + /* + * FIXME: This was inherited from Aldebaran, but no this field + * definition in the regspec of both Aldebaran and SDMA 4.4.2 + */ + sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0; + WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config); + + return 0; +} + +static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev, + struct amdgpu_iv_entry *entry) +{ + int instance; + struct amdgpu_task_info task_info; + u64 addr; + + instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id); + if (instance < 0 || instance >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance invalid %d\n", instance); + return -EINVAL; + } + + addr = (u64)entry->src_data[0] << 12; + addr |= ((u64)entry->src_data[1] & 0xf) << 44; + + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); + amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); + + dev_dbg_ratelimited(adev->dev, + "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u " + "pasid:%u, for process %s pid %d thread %s pid %d\n", + instance, addr, entry->src_id, entry->ring_id, entry->vmid, + entry->pasid, task_info.process_name, task_info.tgid, + task_info.task_name, task_info.pid); + return 0; +} + +static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + + dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, + "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + dev_dbg_ratelimited(adev->dev, + "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); + sdma_v4_4_2_print_iv_entry(adev, entry); + return 0; +} + +static void sdma_v4_4_2_update_medium_grain_clock_gating( + struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + int i; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); + data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK); + if (def != data) + WREG32_SDMA(i, regSDMA_CLK_CTRL, data); + } + } else { + for (i = 0; i < adev->sdma.num_instances; i++) { + def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL); + data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK | + SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK); + if (def != data) + WREG32_SDMA(i, regSDMA_CLK_CTRL, data); + } + } +} + + +static void sdma_v4_4_2_update_medium_grain_light_sleep( + struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def; + int i; + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + /* 1-not override: enable sdma mem light sleep */ + def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL); + data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32_SDMA(0, regSDMA_POWER_CNTL, data); + } + } else { + for (i = 0; i < adev->sdma.num_instances; i++) { + /* 0-override:disable sdma mem light sleep */ + def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL); + data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; + if (def != data) + WREG32_SDMA(0, regSDMA_POWER_CNTL, data); + } + } +} + +static int sdma_v4_4_2_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + return 0; + + sdma_v4_4_2_update_medium_grain_clock_gating(adev, + state == AMD_CG_STATE_GATE); + sdma_v4_4_2_update_medium_grain_light_sleep(adev, + state == AMD_CG_STATE_GATE); + return 0; +} + +static int sdma_v4_4_2_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + if (amdgpu_sriov_vf(adev)) + *flags = 0; + + /* AMD_CG_SUPPORT_SDMA_MGCG */ + data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL)); + if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK)) + *flags |= AMD_CG_SUPPORT_SDMA_MGCG; + + /* AMD_CG_SUPPORT_SDMA_LS */ + data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL)); + if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) + *flags |= AMD_CG_SUPPORT_SDMA_LS; +} + +const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = { + .name = "sdma_v4_4_2", + .early_init = sdma_v4_4_2_early_init, + .late_init = sdma_v4_4_2_late_init, + .sw_init = sdma_v4_4_2_sw_init, + .sw_fini = sdma_v4_4_2_sw_fini, + .hw_init = sdma_v4_4_2_hw_init, + .hw_fini = sdma_v4_4_2_hw_fini, + .suspend = sdma_v4_4_2_suspend, + .resume = sdma_v4_4_2_resume, + .is_idle = sdma_v4_4_2_is_idle, + .wait_for_idle = sdma_v4_4_2_wait_for_idle, + .soft_reset = sdma_v4_4_2_soft_reset, + .set_clockgating_state = sdma_v4_4_2_set_clockgating_state, + .set_powergating_state = sdma_v4_4_2_set_powergating_state, + .get_clockgating_state = sdma_v4_4_2_get_clockgating_state, +}; + +static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = sdma_v4_4_2_ring_get_rptr, + .get_wptr = sdma_v4_4_2_ring_get_wptr, + .set_wptr = sdma_v4_4_2_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ + /* sdma_v4_4_2_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ + .emit_ib = sdma_v4_4_2_ring_emit_ib, + .emit_fence = sdma_v4_4_2_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush, + .test_ring = sdma_v4_4_2_ring_test_ring, + .test_ib = sdma_v4_4_2_ring_test_ib, + .insert_nop = sdma_v4_4_2_ring_insert_nop, + .pad_ib = sdma_v4_4_2_ring_pad_ib, + .emit_wreg = sdma_v4_4_2_ring_emit_wreg, + .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_MMHUB_0, + .get_rptr = sdma_v4_4_2_ring_get_rptr, + .get_wptr = sdma_v4_4_2_page_ring_get_wptr, + .set_wptr = sdma_v4_4_2_page_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ + /* sdma_v4_4_2_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ + .emit_ib = sdma_v4_4_2_ring_emit_ib, + .emit_fence = sdma_v4_4_2_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush, + .test_ring = sdma_v4_4_2_ring_test_ring, + .test_ib = sdma_v4_4_2_ring_test_ib, + .insert_nop = sdma_v4_4_2_ring_insert_nop, + .pad_ib = sdma_v4_4_2_ring_pad_ib, + .emit_wreg = sdma_v4_4_2_ring_emit_wreg, + .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs; + adev->sdma.instance[i].ring.me = i; + if (adev->sdma.has_page_queue) { + adev->sdma.instance[i].page.funcs = + &sdma_v4_4_2_page_ring_funcs; + adev->sdma.instance[i].page.me = i; + } + } +} + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = { + .set = sdma_v4_4_2_set_trap_irq_state, + .process = sdma_v4_4_2_process_trap_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = { + .process = sdma_v4_4_2_process_illegal_inst_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = { + .set = sdma_v4_4_2_set_ecc_irq_state, + .process = amdgpu_sdma_process_ecc_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = { + .process = sdma_v4_4_2_process_vm_hole_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = { + .process = sdma_v4_4_2_process_doorbell_invalid_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = { + .process = sdma_v4_4_2_process_pool_timeout_irq, +}; + +static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = { + .process = sdma_v4_4_2_process_srbm_write_irq, +}; + +static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->sdma.trap_irq.num_types = adev->sdma.num_instances; + adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; + adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; + adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; + adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; + adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; + + adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs; + adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs; + adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs; + adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs; + adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs; + adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs; + adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs; +} + +/** + * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine + * + * @ib: indirect buffer to copy to + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * @tmz: if a secure copy should be used + * + * Copy GPU buffers using the DMA engine. + * Used by the amdgpu ttm implementation to move pages if + * registered as the asic copy callback. + */ +static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib, + uint64_t src_offset, + uint64_t dst_offset, + uint32_t byte_count, + bool tmz) +{ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | + SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | + SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0); + ib->ptr[ib->length_dw++] = byte_count - 1; + ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ + ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); +} + +/** + * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine + * + * @ib: indirect buffer to copy to + * @src_data: value to write to buffer + * @dst_offset: dst GPU address + * @byte_count: number of bytes to xfer + * + * Fill GPU buffers using the DMA engine. + */ +static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib, + uint32_t src_data, + uint64_t dst_offset, + uint32_t byte_count) +{ + ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); + ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); + ib->ptr[ib->length_dw++] = src_data; + ib->ptr[ib->length_dw++] = byte_count - 1; +} + +static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = { + .copy_max_bytes = 0x400000, + .copy_num_dw = 7, + .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer, + + .fill_max_bytes = 0x400000, + .fill_num_dw = 5, + .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer, +}; + +static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev) +{ + adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs; + if (adev->sdma.has_page_queue) + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; + else + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; +} + +static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = { + .copy_pte_num_dw = 7, + .copy_pte = sdma_v4_4_2_vm_copy_pte, + + .write_pte = sdma_v4_4_2_vm_write_pte, + .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde, +}; + +static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev) +{ + struct drm_gpu_scheduler *sched; + unsigned i; + + adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.has_page_queue) + sched = &adev->sdma.instance[i].page.sched; + else + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_scheds[i] = sched; + } + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; +} + +const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = { + .type = AMD_IP_BLOCK_TYPE_SDMA, + .major = 4, + .minor = 4, + .rev = 0, + .funcs = &sdma_v4_4_2_ip_funcs, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h new file mode 100644 index 000000000000..4814e8a074d6 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h @@ -0,0 +1,30 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SDMA_V4_4_2_H__ +#define __SDMA_V4_4_2_H__ + +extern const struct amd_ip_funcs sdma_v4_4_2_ip_funcs; +extern const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7cd17dda32ce..7d04c39332ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -191,47 +191,6 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, } } -/* - * Indirect registers accessor - */ -static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg(adev, address, data, reg); -} - -static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg(adev, address, data, reg, v); -} - -static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg64(adev, address, data, reg); -} - -static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg64(adev, address, data, reg, v); -} - static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -439,8 +398,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { en = &soc15_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) continue; @@ -650,24 +610,6 @@ static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk return 0; } -static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (adev->flags & AMD_IS_APU) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void soc15_program_aspm(struct amdgpu_device *adev) { if (!amdgpu_device_should_use_aspm(adev)) @@ -694,11 +636,6 @@ const struct amdgpu_ip_block_version vega10_common_ip_block = .funcs = &soc15_common_ip_funcs, }; -static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio.funcs->get_rev_id(adev); -} - static void soc15_reg_base_init(struct amdgpu_device *adev) { /* Set IP register base before any HW register access */ @@ -935,10 +872,10 @@ static int soc15_common_early_init(void *handle) } adev->smc_rreg = NULL; adev->smc_wreg = NULL; - adev->pcie_rreg = &soc15_pcie_rreg; - adev->pcie_wreg = &soc15_pcie_wreg; - adev->pcie_rreg64 = &soc15_pcie_rreg64; - adev->pcie_wreg64 = &soc15_pcie_wreg64; + adev->pcie_rreg = &amdgpu_device_indirect_rreg; + adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; + adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; @@ -948,7 +885,7 @@ static int soc15_common_early_init(void *handle) adev->se_cac_rreg = &soc15_se_cac_rreg; adev->se_cac_wreg = &soc15_se_cac_wreg; - adev->rev_id = soc15_get_rev_id(adev); + adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xFF; /* TODO: split the GC and PG flags based on the relevant IP version for which * they are relevant. @@ -1229,8 +1166,6 @@ static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* enable pcie gen2/3 link */ - soc15_pcie_gen3_enable(adev); /* enable aspm */ soc15_program_aspm(adev); /* setup nbio registers */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 620f7409825d..67580761b44d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -102,6 +102,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, }; +/* SRIOV SOC21, not const since data is controlled by host */ +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0), + .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1), + .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1, +}; + +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0), + .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1), + .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1, +}; + static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { @@ -111,62 +164,38 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): - if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; - else - *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + case IP_VERSION(4, 0, 4): + if (amdgpu_sriov_vf(adev)) { + if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) || + !amdgpu_sriov_is_av1_support(adev)) { + if (encode) + *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0; + } } else { - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; - else - *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + } } return 0; default: return -EINVAL; } } -/* - * Indirect registers accessor - */ -static u32 soc21_pcie_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg(adev, address, data, reg); -} - -static void soc21_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg(adev, address, data, reg, v); -} - -static u64 soc21_pcie_rreg64(struct amdgpu_device *adev, u32 reg) -{ - unsigned long address, data; - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - return amdgpu_device_indirect_rreg64(adev, address, data, reg); -} - -static void soc21_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) -{ - unsigned long address, data; - - address = adev->nbio.funcs->get_pcie_index_offset(adev); - data = adev->nbio.funcs->get_pcie_data_offset(adev); - - amdgpu_device_indirect_wreg64(adev, address, data, reg, v); -} static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg) { @@ -291,9 +320,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) { en = &soc21_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = soc21_get_register_value(adev, @@ -410,21 +440,6 @@ static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk return 0; } -static void soc21_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void soc21_program_aspm(struct amdgpu_device *adev) { if (!amdgpu_device_should_use_aspm(adev)) @@ -451,11 +466,6 @@ const struct amdgpu_ip_block_version soc21_common_ip_block = .funcs = &soc21_common_ip_funcs, }; -static uint32_t soc21_get_rev_id(struct amdgpu_device *adev) -{ - return adev->nbio.funcs->get_rev_id(adev); -} - static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (adev->ip_versions[GC_HWIP][0]) { @@ -580,10 +590,10 @@ static int soc21_common_early_init(void *handle) adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; adev->smc_rreg = NULL; adev->smc_wreg = NULL; - adev->pcie_rreg = &soc21_pcie_rreg; - adev->pcie_wreg = &soc21_pcie_wreg; - adev->pcie_rreg64 = &soc21_pcie_rreg64; - adev->pcie_wreg64 = &soc21_pcie_wreg64; + adev->pcie_rreg = &amdgpu_device_indirect_rreg; + adev->pcie_wreg = &amdgpu_device_indirect_wreg; + adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; + adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; adev->pciep_rreg = amdgpu_device_pcie_port_rreg; adev->pciep_wreg = amdgpu_device_pcie_port_wreg; @@ -596,7 +606,7 @@ static int soc21_common_early_init(void *handle) adev->asic_funcs = &soc21_asic_funcs; - adev->rev_id = soc21_get_rev_id(adev); + adev->rev_id = amdgpu_device_get_rev_id(adev); adev->external_rev_id = 0xff; switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): @@ -728,8 +738,23 @@ static int soc21_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); + if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) || + !amdgpu_sriov_is_av1_support(adev)) { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_vcn_4_0_0_video_codecs_encode_array_vcn1, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1), + sriov_vcn_4_0_0_video_codecs_decode_array_vcn1, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1)); + } else { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_vcn_4_0_0_video_codecs_encode_array_vcn0, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0), + sriov_vcn_4_0_0_video_codecs_decode_array_vcn0, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0)); + } + } return 0; } @@ -753,8 +778,6 @@ static int soc21_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* enable pcie gen2/3 link */ - soc21_pcie_gen3_enable(adev); /* enable aspm */ soc21_program_aspm(adev); /* setup nbio registers */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h index 25eaf4af5fcf..c6dfd433fec7 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.h @@ -31,9 +31,9 @@ /* number of umc instance with memory map register access */ #define UMC_V8_10_UMC_INSTANCE_NUM 2 -/* Total channel instances for all umc nodes */ +/* Total channel instances for all available umc nodes */ #define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \ - (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num) + (UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->gmc.num_umc) /* UMC regiser per channel offset */ #define UMC_V8_10_PER_CHANNEL_OFFSET 0x400 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index b0b0e69c6a94..223e7dfe4618 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -225,6 +225,10 @@ static int vcn_v2_5_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + r = amdgpu_vcn_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -2031,6 +2035,4 @@ static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - amdgpu_vcn_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index 43d587404c3e..720ab36f9c92 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -181,6 +181,10 @@ static int vcn_v4_0_sw_init(void *handle) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode; + r = amdgpu_vcn_ras_sw_init(adev); + if (r) + return r; + return 0; } @@ -2123,6 +2127,4 @@ static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev) default: break; } - - amdgpu_vcn_set_ras_funcs(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 1706081d054d..827e2768f867 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -321,7 +321,8 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev) /* psp firmware won't program IH_CHICKEN for aldebaran * driver needs to program it properly according to * MC_SPACE type in IH_RB_CNTL */ - if (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) { + if ((adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 0)) || + (adev->ip_versions[OSSSYS_HWIP][0] == IP_VERSION(4, 4, 2))) { ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN); if (adev->irq.ih.use_bus_addr) { ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, @@ -551,12 +552,14 @@ static int vega20_ih_sw_init(void *handle) adev->irq.ih1.use_doorbell = true; adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; - r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); - if (r) - return r; + if (adev->ip_versions[OSSSYS_HWIP][0] != IP_VERSION(4, 4, 2)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; - adev->irq.ih2.use_doorbell = true; - adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + adev->irq.ih2.use_doorbell = true; + adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1; + } /* initialize ih control registers offset */ vega20_ih_init_register_offset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 12ef782eb478..531f173ade2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -81,10 +81,6 @@ #include "mxgpu_vi.h" #include "amdgpu_dm.h" -#if IS_ENABLED(CONFIG_X86) -#include <asm/intel-family.h> -#endif - #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L @@ -1105,24 +1101,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return 0; } -static void vi_pcie_gen3_enable(struct amdgpu_device *adev) -{ - if (pci_is_root_bus(adev->pdev->bus)) - return; - - if (amdgpu_pcie_gen2 == 0) - return; - - if (adev->flags & AMD_IS_APU) - return; - - if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) - return; - - /* todo */ -} - static void vi_enable_aspm(struct amdgpu_device *adev) { u32 data, orig; @@ -1138,24 +1116,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev) WREG32_PCIE(ixPCIE_LC_CNTL, data); } -static bool aspm_support_quirk_check(void) -{ -#if IS_ENABLED(CONFIG_X86) - struct cpuinfo_x86 *c = &cpu_data(0); - - return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE); -#else - return true; -#endif -} - static void vi_program_aspm(struct amdgpu_device *adev) { u32 data, data1, orig; bool bL1SS = false; bool bClkReqSupport = true; - if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check()) + if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk()) return; if (adev->flags & AMD_IS_APU || @@ -1743,8 +1710,6 @@ static int vi_common_hw_init(void *handle) /* move the golden regs per IP block */ vi_init_golden_registers(adev); - /* enable pcie gen2/3 link */ - vi_pcie_gen3_enable(adev); /* enable aspm */ vi_program_aspm(adev); /* enable the doorbell aperture */ |