summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst14
-rw-r--r--Documentation/arch/x86/index.rst1
-rw-r--r--Documentation/arch/x86/microcode.rst14
-rw-r--r--Documentation/arch/x86/shstk.rst179
-rw-r--r--Documentation/bpf/kfuncs.rst23
-rw-r--r--Documentation/bpf/llvm_reloc.rst18
-rw-r--r--Documentation/bpf/map_hash.rst53
-rw-r--r--Documentation/bpf/map_lru_hash_update.dot172
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml7
-rw-r--r--Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml14
-rw-r--r--Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml2
-rw-r--r--Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml7
-rw-r--r--Documentation/devicetree/bindings/input/atmel,maxtouch.yaml7
-rw-r--r--Documentation/devicetree/bindings/input/pwm-vibrator.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt15
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml36
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml3
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt26
-rw-r--r--Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml52
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.yaml4
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq256xx.yaml18
-rw-r--r--Documentation/devicetree/bindings/reset/oxnas,reset.txt32
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml2
-rw-r--r--Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml39
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml6
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml21
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml2
-rw-r--r--Documentation/filesystems/f2fs.rst16
-rw-r--r--Documentation/filesystems/proc.rst1
-rw-r--r--Documentation/hwmon/asus_ec_sensors.rst1
-rw-r--r--Documentation/hwmon/index.rst2
-rw-r--r--Documentation/hwmon/oxp-sensors.rst2
-rw-r--r--Documentation/mm/arch_pgtable_helpers.rst9
-rw-r--r--MAINTAINERS18
-rw-r--r--arch/alpha/include/asm/pgtable.h6
-rw-r--r--arch/arc/include/asm/hugepage.h2
-rw-r--r--arch/arc/include/asm/pgtable-bits-arcv2.h7
-rw-r--r--arch/arm/arm-soc-for-next-contents.txt217
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts2
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi2
-rw-r--r--arch/arm/boot/dts/s5pv210-pinctrl.dtsi2
-rw-r--r--arch/arm/boot/dts/s5pv210-smdkv210.dts8
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi12
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h7
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/kernel/signal.c2
-rw-r--r--arch/arm/kernel/unwind.c25
-rw-r--r--arch/arm/mach-s3c/Kconfig.s3c64xx6
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c5
-rw-r--r--arch/arm64/boot/dts/arm/foundation-v8.dtsi1
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts1
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/Makefile2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi50
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi123
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044c2-smarc-cru-csi-ov5645.dtso21
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044l2-smarc-cru-csi-ov5645.dtso2
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g054.dtsi203
l---------arch/arm64/boot/dts/renesas/r9a07g054l2-smarc-cru-csi-ov5645.dtso1
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi79
-rw-r--r--arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi79
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts18
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi29
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts27
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi484
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts112
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s.dtsi34
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts3
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi12
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h9
-rw-r--r--arch/arm64/kernel/signal.c2
-rw-r--r--arch/arm64/kernel/signal32.c2
-rw-r--r--arch/arm64/kvm/fpsimd.c26
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h12
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c41
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/kvm/vmid.c7
-rw-r--r--arch/arm64/mm/trans_pgd.c4
-rw-r--r--arch/csky/include/asm/pgtable.h2
-rw-r--r--arch/hexagon/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/loongarch/include/asm/pgtable.h4
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h2
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h6
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/mips/include/asm/pgtable.h6
-rw-r--r--arch/nios2/include/asm/pgtable.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/kernel/isa-bridge.c5
-rw-r--r--arch/powerpc/kernel/tau_6xx.c2
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c6
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c3
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110.dtsi7
-rw-r--r--arch/riscv/include/asm/pgtable.h6
-rw-r--r--arch/s390/include/asm/hugetlb.h4
-rw-r--r--arch/s390/include/asm/pgtable.h14
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/sh/include/asm/pgtable_32.h10
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h6
-rw-r--r--arch/sparc/kernel/signal32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c2
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/x86/Kconfig24
-rw-r--r--arch/x86/Kconfig.assembler5
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/hyperv/hv_vtl.c2
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/disabled-features.h16
-rw-r--r--arch/x86/include/asm/fpu/api.h9
-rw-r--r--arch/x86/include/asm/fpu/regset.h7
-rw-r--r--arch/x86/include/asm/fpu/sched.h3
-rw-r--r--arch/x86/include/asm/fpu/types.h16
-rw-r--r--arch/x86/include/asm/fpu/xstate.h6
-rw-r--r--arch/x86/include/asm/idtentry.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h2
-rw-r--r--arch/x86/include/asm/pgtable.h322
-rw-r--r--arch/x86/include/asm/pgtable_types.h56
-rw-r--r--arch/x86/include/asm/processor.h8
-rw-r--r--arch/x86/include/asm/shstk.h38
-rw-r--r--arch/x86/include/asm/special_insns.h13
-rw-r--r--arch/x86/include/asm/tlbflush.h3
-rw-r--r--arch/x86/include/asm/trap_pf.h2
-rw-r--r--arch/x86/include/asm/traps.h12
-rw-r--r--arch/x86/include/uapi/asm/mman.h4
-rw-r--r--arch/x86/include/uapi/asm/prctl.h12
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/cet.c152
-rw-r--r--arch/x86/kernel/cpu/common.c35
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c9
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c9
-rw-r--r--arch/x86/kernel/cpu/proc.c23
-rw-r--r--arch/x86/kernel/fpu/core.c54
-rw-r--r--arch/x86/kernel/fpu/regset.c81
-rw-r--r--arch/x86/kernel/fpu/xstate.c90
-rw-r--r--arch/x86/kernel/idt.c2
-rw-r--r--arch/x86/kernel/process.c21
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--arch/x86/kernel/ptrace.c12
-rw-r--r--arch/x86/kernel/shstk.c499
-rw-r--r--arch/x86/kernel/signal.c1
-rw-r--r--arch/x86/kernel/signal_32.c2
-rw-r--r--arch/x86/kernel/signal_64.c8
-rw-r--r--arch/x86/kernel/sys_x86_64.c6
-rw-r--r--arch/x86/kernel/traps.c87
-rw-r--r--arch/x86/mm/fault.c22
-rw-r--r--arch/x86/mm/pat/set_memory.c4
-rw-r--r--arch/x86/mm/pgtable.c38
-rw-r--r--arch/x86/xen/enlighten_pv.c2
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/xen-asm.S2
-rw-r--r--arch/xtensa/include/asm/pgtable.h2
-rw-r--r--drivers/base/regmap/Kconfig13
-rw-r--r--drivers/base/regmap/regmap-irq.c5
-rw-r--r--drivers/char/tpm/tpm-chip.c4
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm_tis_core.c43
-rw-r--r--drivers/clk/microchip/Kconfig4
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c2
-rw-r--r--drivers/counter/Kconfig6
-rw-r--r--drivers/counter/stm32-timer-cnt.c3
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/dma-buf/heaps/system_heap.c5
-rw-r--r--drivers/extcon/Kconfig1
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-palmas.c1
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c4
-rw-r--r--drivers/extcon/extcon-usbc-tusb320.c153
-rw-r--r--drivers/extcon/extcon.c368
-rw-r--r--drivers/extcon/extcon.h8
-rw-r--r--drivers/firmware/arm_ffa/bus.c19
-rw-r--r--drivers/firmware/arm_ffa/driver.c9
-rw-r--r--drivers/firmware/arm_scmi/raw_mode.c2
-rw-r--r--drivers/firmware/sysfb_simplefb.c4
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c285
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c2834
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c581
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c31
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c207
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c264
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c252
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c29
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h18
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h40
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c16
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c42
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c43
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c16
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c113
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.h4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.h1
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c259
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c179
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c481
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c52
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c2963
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h221
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c422
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c80
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1096
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c583
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c92
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c135
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dkl_phy.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c320
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.c225
-rw-r--r--drivers/gpu/drm/i915/display/intel_load_detect.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c113
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c261
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h207
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite_uapi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c255
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c64
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h10
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c314
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h12
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c35
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c47
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c588
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h451
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h57
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c5
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c29
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0012.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c6
-rw-r--r--drivers/greybus/connection.c4
-rw-r--r--drivers/greybus/svc.c2
-rw-r--r--drivers/hv/vmbus_drv.c5
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/ad7418.c2
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/adm1021.c2
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c2
-rw-r--r--drivers/hwmon/adm1029.c2
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adm1177.c2
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/ads7828.c2
-rw-r--r--drivers/hwmon/adt7410.c2
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/adt7475.c10
-rw-r--r--drivers/hwmon/aht10.c2
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/asb100.c2
-rw-r--r--drivers/hwmon/asc7621.c2
-rw-r--r--drivers/hwmon/asus-ec-sensors.c30
-rw-r--r--drivers/hwmon/atxp1.c2
-rw-r--r--drivers/hwmon/dme1737.c2
-rw-r--r--drivers/hwmon/ds1621.c2
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc1403.c2
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc2305.c2
-rw-r--r--drivers/hwmon/emc6w201.c2
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/ftsteutates.c2
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/g762.c2
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/gl520sm.c2
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/ina209.c2
-rw-r--r--drivers/hwmon/ina238.c2
-rw-r--r--drivers/hwmon/ina2xx.c2
-rw-r--r--drivers/hwmon/ina3221.c2
-rw-r--r--drivers/hwmon/it87.c75
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/lm63.c2
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm77.c2
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm80.c2
-rw-r--r--drivers/hwmon/lm83.c2
-rw-r--r--drivers/hwmon/lm85.c2
-rw-r--r--drivers/hwmon/lm87.c2
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/lm92.c2
-rw-r--r--drivers/hwmon/lm93.c2
-rw-r--r--drivers/hwmon/lm95234.c2
-rw-r--r--drivers/hwmon/lm95241.c2
-rw-r--r--drivers/hwmon/lm95245.c2
-rw-r--r--drivers/hwmon/ltc2945.c2
-rw-r--r--drivers/hwmon/ltc2947-i2c.c2
-rw-r--r--drivers/hwmon/ltc2990.c2
-rw-r--r--drivers/hwmon/ltc2992.c2
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4215.c2
-rw-r--r--drivers/hwmon/ltc4222.c2
-rw-r--r--drivers/hwmon/ltc4245.c2
-rw-r--r--drivers/hwmon/ltc4260.c2
-rw-r--r--drivers/hwmon/ltc4261.c2
-rw-r--r--drivers/hwmon/max127.c2
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1619.c2
-rw-r--r--drivers/hwmon/max1668.c2
-rw-r--r--drivers/hwmon/max31730.c2
-rw-r--r--drivers/hwmon/max31760.c2
-rw-r--r--drivers/hwmon/max31790.c2
-rw-r--r--drivers/hwmon/max6620.c2
-rw-r--r--drivers/hwmon/max6621.c2
-rw-r--r--drivers/hwmon/max6639.c2
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/max6697.c2
-rw-r--r--drivers/hwmon/mc34vr500.c2
-rw-r--r--drivers/hwmon/mcp3021.c2
-rw-r--r--drivers/hwmon/nct6775-i2c.c2
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c2
-rw-r--r--drivers/hwmon/occ/p8_i2c.c2
-rw-r--r--drivers/hwmon/oxp-sensors.c32
-rw-r--r--drivers/hwmon/pcf8591.c2
-rw-r--r--drivers/hwmon/pmbus/acbel-fsg032.c2
-rw-r--r--drivers/hwmon/pmbus/adm1266.c2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c2
-rw-r--r--drivers/hwmon/pmbus/bel-pfe.c2
-rw-r--r--drivers/hwmon/pmbus/bpa-rs600.c2
-rw-r--r--drivers/hwmon/pmbus/delta-ahe50dc-fan.c2
-rw-r--r--drivers/hwmon/pmbus/dps920ab.c2
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c2
-rw-r--r--drivers/hwmon/pmbus/inspur-ipsps.c2
-rw-r--r--drivers/hwmon/pmbus/ir35221.c2
-rw-r--r--drivers/hwmon/pmbus/ir36021.c2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwmon/pmbus/irps5401.c2
-rw-r--r--drivers/hwmon/pmbus/isl68137.c2
-rw-r--r--drivers/hwmon/pmbus/lm25066.c2
-rw-r--r--drivers/hwmon/pmbus/lt7182s.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c2
-rw-r--r--drivers/hwmon/pmbus/ltc3815.c2
-rw-r--r--drivers/hwmon/pmbus/max15301.c2
-rw-r--r--drivers/hwmon/pmbus/max16064.c2
-rw-r--r--drivers/hwmon/pmbus/max16601.c2
-rw-r--r--drivers/hwmon/pmbus/max20730.c2
-rw-r--r--drivers/hwmon/pmbus/max20751.c2
-rw-r--r--drivers/hwmon/pmbus/max31785.c2
-rw-r--r--drivers/hwmon/pmbus/max34440.c2
-rw-r--r--drivers/hwmon/pmbus/max8688.c2
-rw-r--r--drivers/hwmon/pmbus/mp2888.c2
-rw-r--r--drivers/hwmon/pmbus/mp2975.c2
-rw-r--r--drivers/hwmon/pmbus/mp5023.c2
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c2
-rw-r--r--drivers/hwmon/pmbus/pim4328.c2
-rw-r--r--drivers/hwmon/pmbus/pli1209bc.c2
-rw-r--r--drivers/hwmon/pmbus/pm6764tr.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pxe1610.c2
-rw-r--r--drivers/hwmon/pmbus/q54sj108a2.c2
-rw-r--r--drivers/hwmon/pmbus/stpddc60.c2
-rw-r--r--drivers/hwmon/pmbus/tda38640.c2
-rw-r--r--drivers/hwmon/pmbus/tps40422.c2
-rw-r--r--drivers/hwmon/pmbus/tps53679.c2
-rw-r--r--drivers/hwmon/pmbus/tps546d24.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c2
-rw-r--r--drivers/hwmon/pmbus/xdpe152c4.c2
-rw-r--r--drivers/hwmon/pmbus/zl6100.c2
-rw-r--r--drivers/hwmon/powr1220.c2
-rw-r--r--drivers/hwmon/sbrmi.c2
-rw-r--r--drivers/hwmon/sbtsi_temp.c2
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht3x.c2
-rw-r--r--drivers/hwmon/sht4x.c2
-rw-r--r--drivers/hwmon/shtc1.c2
-rw-r--r--drivers/hwmon/smm665.c2
-rw-r--r--drivers/hwmon/smsc47m192.c2
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/tc654.c2
-rw-r--r--drivers/hwmon/tc74.c2
-rw-r--r--drivers/hwmon/thmc50.c2
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp103.c2
-rw-r--r--drivers/hwmon/tmp108.c2
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/tmp464.c2
-rw-r--r--drivers/hwmon/tmp513.c2
-rw-r--r--drivers/hwmon/tps23861.c2
-rw-r--r--drivers/hwmon/w83773g.c2
-rw-r--r--drivers/hwmon/w83781d.c2
-rw-r--r--drivers/hwmon/w83791d.c2
-rw-r--r--drivers/hwmon/w83792d.c2
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwmon/w83795.c2
-rw-r--r--drivers/hwmon/w83l785ts.c2
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c4
-rw-r--r--drivers/iio/adc/ad4130.c12
-rw-r--r--drivers/iio/adc/ad7192.c8
-rw-r--r--drivers/iio/adc/imx93_adc.c7
-rw-r--r--drivers/iio/adc/mt6370-adc.c53
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c10
-rw-r--r--drivers/iio/adc/palmas_gpadc.c10
-rw-r--r--drivers/iio/adc/stm32-adc.c61
-rw-r--r--drivers/iio/addac/ad74413r.c2
-rw-r--r--drivers/iio/industrialio-gts-helper.c42
-rw-r--r--drivers/iio/light/rohm-bu27034.c14
-rw-r--r--drivers/iio/light/vcnl4035.c3
-rw-r--r--drivers/iio/magnetometer/tmag5273.c5
-rw-r--r--drivers/infiniband/hw/hfi1/init.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/input/Kconfig2
-rw-r--r--drivers/input/input.c8
-rw-r--r--drivers/input/misc/drv260x.c56
-rw-r--r--drivers/input/misc/pwm-vibra.c36
-rw-r--r--drivers/input/misc/uinput.c34
-rw-r--r--drivers/input/tests/input_test.c6
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c85
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c3
-rw-r--r--drivers/input/touchscreen/cyttsp5.c84
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c1
-rw-r--r--drivers/input/touchscreen/ili210x.c36
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c6
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c41
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-dma.c21
-rw-r--r--drivers/memory/renesas-rpc-if.c53
-rw-r--r--drivers/mmc/core/block.c5
-rw-r--r--drivers/mmc/host/sdhci-cadence.c8
-rw-r--r--drivers/mux/Kconfig2
-rw-r--r--drivers/mux/mmio.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h35
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c116
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c3
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/mdio/mdio-mvusb.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h15
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h37
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c457
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c59
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c121
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h72
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c84
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h76
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c86
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c283
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c173
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c1775
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c86
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c15
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c720
-rw-r--r--drivers/phy/microchip/sparx5_serdes.c426
-rw-r--r--drivers/phy/microchip/sparx5_serdes.h1
-rw-r--r--drivers/phy/microchip/sparx5_serdes_regs.h106
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c45
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c6
-rw-r--r--drivers/pinctrl/pinctrl-amd.c50
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/power/reset/gpio-restart.c7
-rw-r--r--drivers/power/supply/ab8500_btemp.c6
-rw-r--r--drivers/power/supply/ab8500_fg.c6
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c2
-rw-r--r--drivers/power/supply/bq24190_charger.c1
-rw-r--r--drivers/power/supply/bq256xx_charger.c28
-rw-r--r--drivers/power/supply/bq25890_charger.c5
-rw-r--r--drivers/power/supply/bq27xxx_battery.c181
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c3
-rw-r--r--drivers/power/supply/cros_peripheral_charger.c25
-rw-r--r--drivers/power/supply/max17042_battery.c7
-rw-r--r--drivers/power/supply/power_supply_hwmon.c2
-rw-r--r--drivers/power/supply/power_supply_leds.c5
-rw-r--r--drivers/power/supply/rk817_charger.c2
-rw-r--r--drivers/power/supply/sc27xx_fuel_gauge.c9
-rw-r--r--drivers/power/supply/twl4030_madc_battery.c8
-rw-r--r--drivers/regulator/88pg86x.c2
-rw-r--r--drivers/regulator/act8865-regulator.c2
-rw-r--r--drivers/regulator/ad5398.c2
-rw-r--r--drivers/regulator/da9121-regulator.c2
-rw-r--r--drivers/regulator/da9210-regulator.c2
-rw-r--r--drivers/regulator/da9211-regulator.c2
-rw-r--r--drivers/regulator/fan53555.c2
-rw-r--r--drivers/regulator/fan53880.c2
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/isl9305.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp872x.c2
-rw-r--r--drivers/regulator/lp8755.c2
-rw-r--r--drivers/regulator/ltc3589.c2
-rw-r--r--drivers/regulator/ltc3676.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max20086-regulator.c2
-rw-r--r--drivers/regulator/max20411-regulator.c2
-rw-r--r--drivers/regulator/max77826-regulator.c2
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8660.c2
-rw-r--r--drivers/regulator/max8893.c2
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/mcp16502.c2
-rw-r--r--drivers/regulator/mp5416.c2
-rw-r--r--drivers/regulator/mp8859.c2
-rw-r--r--drivers/regulator/mp886x.c2
-rw-r--r--drivers/regulator/mpq7920.c2
-rw-r--r--drivers/regulator/mt6311-regulator.c2
-rw-r--r--drivers/regulator/pca9450-regulator.c2
-rw-r--r--drivers/regulator/pf8x00-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/pv88060-regulator.c2
-rw-r--r--drivers/regulator/pv88080-regulator.c2
-rw-r--r--drivers/regulator/pv88090-regulator.c2
-rw-r--r--drivers/regulator/rpi-panel-attiny-regulator.c2
-rw-r--r--drivers/regulator/rt4801-regulator.c2
-rw-r--r--drivers/regulator/rt5190a-regulator.c2
-rw-r--r--drivers/regulator/rt5759-regulator.c2
-rw-r--r--drivers/regulator/rt6160-regulator.c2
-rw-r--r--drivers/regulator/rt6190-regulator.c2
-rw-r--r--drivers/regulator/rt6245-regulator.c2
-rw-r--r--drivers/regulator/rtmv20-regulator.c2
-rw-r--r--drivers/regulator/rtq2134-regulator.c2
-rw-r--r--drivers/regulator/rtq6752-regulator.c2
-rw-r--r--drivers/regulator/slg51000-regulator.c2
-rw-r--r--drivers/regulator/sy8106a-regulator.c2
-rw-r--r--drivers/regulator/sy8824x.c2
-rw-r--r--drivers/regulator/sy8827n.c2
-rw-r--r--drivers/regulator/tps51632-regulator.c2
-rw-r--r--drivers/regulator/tps62360-regulator.c2
-rw-r--r--drivers/regulator/tps6286x-regulator.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps65132-regulator.c2
-rw-r--r--drivers/reset/Kconfig6
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-ath79.c4
-rw-r--r--drivers/reset/reset-axs10x.c4
-rw-r--r--drivers/reset/reset-brcmstb-rescal.c4
-rw-r--r--drivers/reset/reset-hsdk.c7
-rw-r--r--drivers/reset/reset-lpc18xx.c4
-rw-r--r--drivers/reset/reset-meson-audio-arb.c7
-rw-r--r--drivers/reset/reset-meson.c4
-rw-r--r--drivers/reset/reset-oxnas.c114
-rw-r--r--drivers/reset/starfive/Kconfig3
-rw-r--r--drivers/reset/sti/Kconfig4
-rw-r--r--drivers/reset/sti/Makefile4
-rw-r--r--drivers/reset/sti/reset-syscfg.c18
-rw-r--r--drivers/scsi/NCR5380.c5
-rw-r--r--drivers/scsi/hosts.c12
-rw-r--r--drivers/scsi/libiscsi.c5
-rw-r--r--drivers/scsi/libsas/sas_expander.c124
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c44
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c392
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c32
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h1
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c126
-rw-r--r--drivers/scsi/qedf/qedf_main.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h21
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c256
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c33
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c130
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/smartpqi/Kconfig2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c284
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c34
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h2
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c6
-rw-r--r--drivers/soc/rockchip/dtpm.c54
-rw-r--r--drivers/soc/rockchip/pm_domains.c160
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c4
-rw-r--r--drivers/soundwire/bus.c12
-rw-r--r--drivers/soundwire/qcom.c387
-rw-r--r--drivers/spi/spi-dw-dma.c24
-rw-r--r--drivers/spi/spi-fsl-lpspi.c6
-rw-r--r--drivers/spi/spi-geni-qcom.c2
-rw-r--r--drivers/spi/spi-qcom-qspi.c218
-rw-r--r--drivers/spi/spi-s3c64xx.c81
-rw-r--r--drivers/staging/octeon/TODO1
-rw-r--r--drivers/ufs/core/ufshcd.c30
-rw-r--r--drivers/ufs/host/ufs-mediatek.c3
-rw-r--r--drivers/vhost/net.c11
-rw-r--r--drivers/vhost/vdpa.c34
-rw-r--r--drivers/vhost/vhost.c18
-rw-r--r--drivers/vhost/vhost.h8
-rw-r--r--drivers/virt/acrn/ioreq.c4
-rw-r--r--drivers/xen/pvcalls-back.c4
-rw-r--r--fs/9p/v9fs.h2
-rw-r--r--fs/aio.c2
-rw-r--r--fs/btrfs/backref.c17
-rw-r--r--fs/btrfs/block-rsv.c3
-rw-r--r--fs/btrfs/ctree.c38
-rw-r--r--fs/btrfs/file-item.c5
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/btrfs/zoned.c6
-rw-r--r--fs/buffer.c70
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/smb2ops.c2
-rw-r--r--fs/cifs/smb2pdu.c3
-rw-r--r--fs/ecryptfs/crypto.c10
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/read_write.c8
-rw-r--r--fs/ext2/Makefile5
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/file.c126
-rw-r--r--fs/ext2/inode.c58
-rw-r--r--fs/ext2/trace.c6
-rw-r--r--fs/ext2/trace.h94
-rw-r--r--fs/ext4/balloc.c43
-rw-r--r--fs/ext4/ext4.h15
-rw-r--r--fs/ext4/extents_status.c30
-rw-r--r--fs/ext4/fsync.c33
-rw-r--r--fs/ext4/hash.c7
-rw-r--r--fs/ext4/ialloc.c12
-rw-r--r--fs/ext4/inline.c3
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/mballoc.c78
-rw-r--r--fs/ext4/mmp.c30
-rw-r--r--fs/ext4/namei.c53
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/ext4/xattr.c5
-rw-r--r--fs/f2fs/checkpoint.c7
-rw-r--r--fs/f2fs/data.c5
-rw-r--r--fs/f2fs/f2fs.h20
-rw-r--r--fs/f2fs/file.c11
-rw-r--r--fs/f2fs/gc.c2
-rw-r--r--fs/f2fs/node.c13
-rw-r--r--fs/f2fs/super.c134
-rw-r--r--fs/f2fs/sysfs.c39
-rw-r--r--fs/fs-writeback.c16
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/hugetlbfs/inode.c5
-rw-r--r--fs/ksmbd/connection.c3
-rw-r--r--fs/ksmbd/mgmt/tree_connect.c11
-rw-r--r--fs/ksmbd/mgmt/tree_connect.h3
-rw-r--r--fs/ksmbd/oplock.c2
-rw-r--r--fs/ksmbd/smb2misc.c5
-rw-r--r--fs/ksmbd/smb2pdu.c5
-rw-r--r--fs/ksmbd/smb_common.c4
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/nfsd/nfsctl.c25
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c11
-rw-r--r--fs/ntfs/mft.c38
-rw-r--r--fs/ntfs3/attrib.c2
-rw-r--r--fs/ntfs3/attrlist.c7
-rw-r--r--fs/ntfs3/bitmap.c10
-rw-r--r--fs/ntfs3/file.c6
-rw-r--r--fs/ntfs3/frecord.c58
-rw-r--r--fs/ntfs3/fslog.c40
-rw-r--r--fs/ntfs3/fsntfs.c99
-rw-r--r--fs/ntfs3/index.c20
-rw-r--r--fs/ntfs3/inode.c23
-rw-r--r--fs/ntfs3/lznt.c6
-rw-r--r--fs/ntfs3/namei.c31
-rw-r--r--fs/ntfs3/ntfs.h117
-rw-r--r--fs/ntfs3/ntfs_fs.h31
-rw-r--r--fs/ntfs3/record.c14
-rw-r--r--fs/ntfs3/run.c4
-rw-r--r--fs/ntfs3/super.c279
-rw-r--r--fs/ntfs3/xattr.c20
-rw-r--r--fs/pipe.c39
-rw-r--r--fs/proc/array.c6
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/splice.c11
-rw-r--r--fs/xfs/libxfs/xfs_ag.c19
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c5
-rw-r--r--fs/xfs/scrub/bmap.c4
-rw-r--r--fs/xfs/scrub/common.c26
-rw-r--r--fs/xfs/scrub/common.h2
-rw-r--r--fs/xfs/scrub/fscounters.c13
-rw-r--r--fs/xfs/scrub/scrub.c2
-rw-r--r--fs/xfs/scrub/scrub.h1
-rw-r--r--fs/xfs/scrub/trace.h1
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_icache.c40
-rw-r--r--fs/xfs/xfs_iomap.c5
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_super.c3
-rw-r--r--include/asm-generic/hugetlb.h4
-rw-r--r--include/drm/display/drm_dp.h8
-rw-r--r--include/drm/display/drm_dp_helper.h5
-rw-r--r--include/drm/ttm/ttm_tt.h2
-rw-r--r--include/linux/arm_ffa.h1
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/bpf_verifier.h27
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/cpuset.h12
-rw-r--r--include/linux/i8042.h1
-rw-r--r--include/linux/iio/iio-gts-helper.h2
-rw-r--r--include/linux/io_uring.h10
-rw-r--r--include/linux/lockdep.h14
-rw-r--r--include/linux/lockdep_types.h1
-rw-r--r--include/linux/maple_tree.h129
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/mm.h79
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmdebug.h14
-rw-r--r--include/linux/pgtable.h14
-rw-r--r--include/linux/pipe_fs_i.h8
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h1
-rw-r--r--include/linux/power/bq27xxx_battery.h4
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/regmap.h3
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/shrinker.h13
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/tpm.h1
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/cfg802154.h3
-rw-r--r--include/net/ieee802154_netdev.h20
-rw-r--r--include/net/xsk_buff_pool.h2
-rw-r--r--include/sound/core.h2
-rw-r--r--include/trace/events/afs.h475
-rw-r--r--include/uapi/asm-generic/siginfo.h3
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/drm/amdgpu_drm.h22
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/uapi/linux/mman.h14
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--init/Kconfig10
-rw-r--r--ipc/ipc_sysctl.c37
-rw-r--r--ipc/mq_sysctl.c36
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/bpf/helpers.c123
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/verifier.c746
-rw-r--r--kernel/cgroup/cgroup.c4
-rw-r--r--kernel/cgroup/cpuset.c244
-rw-r--r--kernel/cgroup/rstat.c26
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/lockdep.c28
-rw-r--r--kernel/sched/core.c41
-rw-r--r--kernel/sched/deadline.c67
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/trace/bpf_trace.c4
-rw-r--r--kernel/ucount.c16
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/debugobjects.c15
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/maple_tree.c1176
-rw-r--r--lib/test_maple_tree.c863
-rw-r--r--mm/compaction.c12
-rw-r--r--mm/debug.c9
-rw-r--r--mm/debug_vm_pgtable.c16
-rw-r--r--mm/dmapool.c10
-rw-r--r--mm/filemap.c171
-rw-r--r--mm/gup.c152
-rw-r--r--mm/huge_memory.c11
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/internal.h30
-rw-r--r--mm/kfence/kfence.h2
-rw-r--r--mm/memcontrol.c192
-rw-r--r--mm/memory.c8
-rw-r--r--mm/migrate.c51
-rw-r--r--mm/migrate_device.c2
-rw-r--r--mm/mmap.c179
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/nommu.c4
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c17
-rw-r--r--mm/shrinker_debug.c15
-rw-r--r--mm/userfaultfd.c2
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/workingset.c150
-rw-r--r--mm/zsmalloc.c36
-rw-r--r--mm/zswap.c16
-rw-r--r--net/bluetooth/hci_conn.c77
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ieee802154/header_ops.c36
-rw-r--r--net/ieee802154/nl802154.c13
-rw-r--r--net/mac802154/ieee802154_i.h21
-rw-r--r--net/mac802154/main.c2
-rw-r--r--net/mac802154/rx.c70
-rw-r--r--net/mac802154/scan.c68
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c10
-rw-r--r--net/sunrpc/svc.c17
-rw-r--r--net/sunrpc/svcsock.c16
-rw-r--r--net/xdp/xsk_buff_pool.c7
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_interface_core.c54
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_user.c1
-rw-r--r--samples/bpf/tcp_basertt_kern.c2
-rw-r--r--scripts/spelling.txt22
-rw-r--r--security/apparmor/crypto.c10
-rw-r--r--security/apparmor/domain.c2
-rw-r--r--security/apparmor/file.c2
-rw-r--r--security/apparmor/include/task.h2
-rw-r--r--security/apparmor/policy.c7
-rw-r--r--security/apparmor/policy_unpack.c11
-rw-r--r--security/apparmor/policy_unpack_test.c13
-rw-r--r--security/apparmor/secid.c3
-rw-r--r--security/apparmor/task.c5
-rw-r--r--security/selinux/Makefile19
-rw-r--r--security/selinux/avc.c20
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/selinux/ima.c2
-rw-r--r--security/selinux/include/audit.h2
-rw-r--r--security/selinux/include/avc.h3
-rw-r--r--security/selinux/include/ima.h2
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/avtab.c2
-rw-r--r--security/selinux/ss/avtab.h2
-rw-r--r--security/selinux/ss/context.h1
-rw-r--r--security/selinux/ss/policydb.c2
-rw-r--r--security/selinux/ss/services.c40
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c16
-rw-r--r--sound/pci/emu10k1/emufx.c7
-rw-r--r--sound/pci/emu10k1/emumixer.c68
-rw-r--r--sound/pci/emu10k1/emupcm.c2
-rw-r--r--sound/pci/emu10k1/emuproc.c6
-rw-r--r--sound/pci/emu10k1/io.c25
-rw-r--r--sound/pci/emu10k1/timer.c6
-rw-r--r--sound/pci/hda/patch_realtek.c3
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c7
-rw-r--r--sound/soc/dwc/dwc-i2s.c4
-rw-r--r--tools/bpf/bpftool/link.c10
-rw-r--r--tools/bpf/bpftool/map.c12
-rw-r--r--tools/counter/.gitignore2
-rw-r--r--tools/counter/Makefile1
-rw-r--r--tools/include/nolibc/nolibc.h2
-rw-r--r--tools/lib/bpf/bpf_tracing.h3
-rw-r--r--tools/lib/bpf/btf_dump.c22
-rw-r--r--tools/perf/Documentation/perf-script.txt2
-rw-r--r--tools/perf/arch/common.c18
-rw-r--r--tools/perf/builtin-script.c60
-rw-r--r--tools/perf/util/dso.c33
-rw-r--r--tools/perf/util/dso.h8
-rw-r--r--tools/perf/util/evsel_fprintf.c16
-rw-r--r--tools/perf/util/evsel_fprintf.h1
-rw-r--r--tools/perf/util/map.c32
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/power/cpupower/lib/powercap.c23
-rw-r--r--tools/power/cpupower/utils/idle_monitor/mperf_monitor.c31
-rw-r--r--tools/testing/radix-tree/linux/init.h1
-rw-r--r--tools/testing/radix-tree/maple.c164
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch6482
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x1
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c68
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c53
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h4
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c307
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c336
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_under_cgroup.c51
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subprog_precision.c536
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c4
-rw-r--r--tools/testing/selftests/bpf/test_progs.c37
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c207
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h3
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c143
-rw-r--r--tools/testing/selftests/bpf/veristat.c9
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c47
-rw-r--r--tools/testing/selftests/bpf/xdp_metadata.h1
-rw-r--r--tools/testing/selftests/cachestat/.gitignore2
-rw-r--r--tools/testing/selftests/cachestat/Makefile8
-rw-r--r--tools/testing/selftests/cachestat/test_cachestat.c258
-rw-r--r--tools/testing/selftests/clone3/clone3.c5
-rw-r--r--tools/testing/selftests/ftrace/Makefile3
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest63
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest-ktap8
-rw-r--r--tools/testing/selftests/media_tests/video_device_test.c111
-rw-r--r--tools/testing/selftests/prctl/set-anon-vma-name-test.c2
-rw-r--r--tools/testing/selftests/sgx/Makefile1
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_clock_getres.c4
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_shadow_stack.c695
-rw-r--r--tools/virtio/ringtest/.gitignore6
1178 files changed, 34437 insertions, 10348 deletions
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index d85d90f5d000..193e40d5f257 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -568,6 +568,9 @@ default (``MSGMNB``).
``msgmni`` is the maximum number of IPC queues. 32000 by default
(``MSGMNI``).
+All of these parameters are set per ipc namespace. The maximum number of bytes
+in POSIX message queues is limited by ``RLIMIT_MSGQUEUE``. This limit is
+respected hierarchically in the each user namespace.
msg_next_id, sem_next_id, and shm_next_id (System V IPC)
========================================================
@@ -1228,15 +1231,20 @@ are doing anyway :)
shmall
======
-This parameter sets the total amount of shared memory pages that
-can be used system wide. Hence, ``shmall`` should always be at least
-``ceil(shmmax/PAGE_SIZE)``.
+This parameter sets the total amount of shared memory pages that can be used
+inside ipc namespace. The shared memory pages counting occurs for each ipc
+namespace separately and is not inherited. Hence, ``shmall`` should always be at
+least ``ceil(shmmax/PAGE_SIZE)``.
If you are not sure what the default ``PAGE_SIZE`` is on your Linux
system, you can run the following command::
# getconf PAGE_SIZE
+To reduce or disable the ability to allocate shared memory, you must create a
+new ipc namespace, set this parameter to the required value and prohibit the
+creation of a new ipc namespace in the current user namespace or cgroups can
+be used.
shmmax
======
diff --git a/Documentation/arch/x86/index.rst b/Documentation/arch/x86/index.rst
index c73d133fd37c..8ac64d7de4dc 100644
--- a/Documentation/arch/x86/index.rst
+++ b/Documentation/arch/x86/index.rst
@@ -22,6 +22,7 @@ x86-specific Documentation
mtrr
pat
intel-hfi
+ shstk
iommu
intel_txt
amd-memory-encryption
diff --git a/Documentation/arch/x86/microcode.rst b/Documentation/arch/x86/microcode.rst
index b627c6f36bcf..d5ef9184030c 100644
--- a/Documentation/arch/x86/microcode.rst
+++ b/Documentation/arch/x86/microcode.rst
@@ -208,6 +208,20 @@ Basically there is no way to declare a new microcode update suitable
for late-loading. This is another one of the problems that caused late
loading to be not enabled by default.
+AMD
+---
+
+Late loading on AMD does not have the concurrency issues described
+above: when loading is attempted on T0, the T1 is quiesced and does not
+execute instructions. Therefore, even if a higher priority interrupt or
+a fault happens, the whole core will see it either before the microcode
+patch has been applied or after. In either case, T0 and T1 will have the
+same microcode revision and nothing intermediate.
+
+In addition, microcode patches which modify software-visible features
+like CPUID bits, MSRs, chicken bits, etc are not released for late
+loading.
+
Builtin microcode
=================
diff --git a/Documentation/arch/x86/shstk.rst b/Documentation/arch/x86/shstk.rst
new file mode 100644
index 000000000000..60260e809baf
--- /dev/null
+++ b/Documentation/arch/x86/shstk.rst
@@ -0,0 +1,179 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================================
+Control-flow Enforcement Technology (CET) Shadow Stack
+======================================================
+
+CET Background
+==============
+
+Control-flow Enforcement Technology (CET) covers several related x86 processor
+features that provide protection against control flow hijacking attacks. CET
+can protect both applications and the kernel.
+
+CET introduces shadow stack and indirect branch tracking (IBT). A shadow stack
+is a secondary stack allocated from memory which cannot be directly modified by
+applications. When executing a CALL instruction, the processor pushes the
+return address to both the normal stack and the shadow stack. Upon
+function return, the processor pops the shadow stack copy and compares it
+to the normal stack copy. If the two differ, the processor raises a
+control-protection fault. IBT verifies indirect CALL/JMP targets are intended
+as marked by the compiler with 'ENDBR' opcodes. Not all CPU's have both Shadow
+Stack and Indirect Branch Tracking. Today in the 64-bit kernel, only userspace
+shadow stack and kernel IBT are supported.
+
+Requirements to use Shadow Stack
+================================
+
+To use userspace shadow stack you need HW that supports it, a kernel
+configured with it and userspace libraries compiled with it.
+
+The kernel Kconfig option is X86_USER_SHADOW_STACK. When compiled in, shadow
+stacks can be disabled at runtime with the kernel parameter: nousershstk.
+
+To build a user shadow stack enabled kernel, Binutils v2.29 or LLVM v6 or later
+are required.
+
+At run time, /proc/cpuinfo shows CET features if the processor supports
+CET. "user_shstk" means that userspace shadow stack is supported on the current
+kernel and HW.
+
+Application Enabling
+====================
+
+An application's CET capability is marked in its ELF note and can be verified
+from readelf/llvm-readelf output::
+
+ readelf -n <application> | grep -a SHSTK
+ properties: x86 feature: SHSTK
+
+The kernel does not process these applications markers directly. Applications
+or loaders must enable CET features using the interface described in section 4.
+Typically this would be done in dynamic loader or static runtime objects, as is
+the case in GLIBC.
+
+Enabling arch_prctl()'s
+=======================
+
+Elf features should be enabled by the loader using the below arch_prctl's. They
+are only supported in 64 bit user applications. These operate on the features
+on a per-thread basis. The enablement status is inherited on clone, so if the
+feature is enabled on the first thread, it will propagate to all the thread's
+in an app.
+
+arch_prctl(ARCH_SHSTK_ENABLE, unsigned long feature)
+ Enable a single feature specified in 'feature'. Can only operate on
+ one feature at a time.
+
+arch_prctl(ARCH_SHSTK_DISABLE, unsigned long feature)
+ Disable a single feature specified in 'feature'. Can only operate on
+ one feature at a time.
+
+arch_prctl(ARCH_SHSTK_LOCK, unsigned long features)
+ Lock in features at their current enabled or disabled status. 'features'
+ is a mask of all features to lock. All bits set are processed, unset bits
+ are ignored. The mask is ORed with the existing value. So any feature bits
+ set here cannot be enabled or disabled afterwards.
+
+arch_prctl(ARCH_SHSTK_UNLOCK, unsigned long features)
+ Unlock features. 'features' is a mask of all features to unlock. All
+ bits set are processed, unset bits are ignored. Only works via ptrace.
+
+arch_prctl(ARCH_SHSTK_STATUS, unsigned long addr)
+ Copy the currently enabled features to the address passed in addr. The
+ features are described using the bits passed into the others in
+ 'features'.
+
+The return values are as follows. On success, return 0. On error, errno can
+be::
+
+ -EPERM if any of the passed feature are locked.
+ -ENOTSUPP if the feature is not supported by the hardware or
+ kernel.
+ -EINVAL arguments (non existing feature, etc)
+ -EFAULT if could not copy information back to userspace
+
+The feature's bits supported are::
+
+ ARCH_SHSTK_SHSTK - Shadow stack
+ ARCH_SHSTK_WRSS - WRSS
+
+Currently shadow stack and WRSS are supported via this interface. WRSS
+can only be enabled with shadow stack, and is automatically disabled
+if shadow stack is disabled.
+
+Proc Status
+===========
+To check if an application is actually running with shadow stack, the
+user can read the /proc/$PID/status. It will report "wrss" or "shstk"
+depending on what is enabled. The lines look like this::
+
+ x86_Thread_features: shstk wrss
+ x86_Thread_features_locked: shstk wrss
+
+Implementation of the Shadow Stack
+==================================
+
+Shadow Stack Size
+-----------------
+
+A task's shadow stack is allocated from memory to a fixed size of
+MIN(RLIMIT_STACK, 4 GB). In other words, the shadow stack is allocated to
+the maximum size of the normal stack, but capped to 4 GB. In the case
+of the clone3 syscall, there is a stack size passed in and shadow stack
+uses this instead of the rlimit.
+
+Signal
+------
+
+The main program and its signal handlers use the same shadow stack. Because
+the shadow stack stores only return addresses, a large shadow stack covers
+the condition that both the program stack and the signal alternate stack run
+out.
+
+When a signal happens, the old pre-signal state is pushed on the stack. When
+shadow stack is enabled, the shadow stack specific state is pushed onto the
+shadow stack. Today this is only the old SSP (shadow stack pointer), pushed
+in a special format with bit 63 set. On sigreturn this old SSP token is
+verified and restored by the kernel. The kernel will also push the normal
+restorer address to the shadow stack to help userspace avoid a shadow stack
+violation on the sigreturn path that goes through the restorer.
+
+So the shadow stack signal frame format is as follows::
+
+ |1...old SSP| - Pointer to old pre-signal ssp in sigframe token format
+ (bit 63 set to 1)
+ | ...| - Other state may be added in the future
+
+
+32 bit ABI signals are not supported in shadow stack processes. Linux prevents
+32 bit execution while shadow stack is enabled by the allocating shadow stacks
+outside of the 32 bit address space. When execution enters 32 bit mode, either
+via far call or returning to userspace, a #GP is generated by the hardware
+which, will be delivered to the process as a segfault. When transitioning to
+userspace the register's state will be as if the userspace ip being returned to
+caused the segfault.
+
+Fork
+----
+
+The shadow stack's vma has VM_SHADOW_STACK flag set; its PTEs are required
+to be read-only and dirty. When a shadow stack PTE is not RO and dirty, a
+shadow access triggers a page fault with the shadow stack access bit set
+in the page fault error code.
+
+When a task forks a child, its shadow stack PTEs are copied and both the
+parent's and the child's shadow stack PTEs are cleared of the dirty bit.
+Upon the next shadow stack access, the resulting shadow stack page fault
+is handled by page copy/re-use.
+
+When a pthread child is created, the kernel allocates a new shadow stack
+for the new thread. New shadow stack creation behaves like mmap() with respect
+to ASLR behavior. Similarly, on thread exit the thread's shadow stack is
+disabled.
+
+Exec
+----
+
+On exec, shadow stack features are disabled by the kernel. At which point,
+userspace can choose to re-enable, or lock them.
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index ea2516374d92..7a3d9de5f315 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -100,7 +100,7 @@ Hence, whenever a constant scalar argument is accepted by a kfunc which is not a
size parameter, and the value of the constant matters for program safety, __k
suffix should be used.
-2.2.2 __uninit Annotation
+2.2.3 __uninit Annotation
-------------------------
This annotation is used to indicate that the argument will be treated as
@@ -117,6 +117,27 @@ Here, the dynptr will be treated as an uninitialized dynptr. Without this
annotation, the verifier will reject the program if the dynptr passed in is
not initialized.
+2.2.4 __opt Annotation
+-------------------------
+
+This annotation is used to indicate that the buffer associated with an __sz or __szk
+argument may be null. If the function is passed a nullptr in place of the buffer,
+the verifier will not check that length is appropriate for the buffer. The kfunc is
+responsible for checking if this buffer is null before using it.
+
+An example is given below::
+
+ __bpf_kfunc void *bpf_dynptr_slice(..., void *buffer__opt, u32 buffer__szk)
+ {
+ ...
+ }
+
+Here, the buffer may be null. If buffer is not null, it at least of size buffer_szk.
+Either way, the returned buffer is either NULL, or of size buffer_szk. Without this
+annotation, the verifier will reject the program if a null pointer is passed in with
+a nonzero size.
+
+
.. _BPF_kfunc_nodef:
2.3 Using an existing kernel function
diff --git a/Documentation/bpf/llvm_reloc.rst b/Documentation/bpf/llvm_reloc.rst
index ca8957d5b671..e4a777a6a3a2 100644
--- a/Documentation/bpf/llvm_reloc.rst
+++ b/Documentation/bpf/llvm_reloc.rst
@@ -48,7 +48,7 @@ the code with ``llvm-objdump -dr test.o``::
14: 0f 10 00 00 00 00 00 00 r0 += r1
15: 95 00 00 00 00 00 00 00 exit
-There are four relations in the above for four ``LD_imm64`` instructions.
+There are four relocations in the above for four ``LD_imm64`` instructions.
The following ``llvm-readelf -r test.o`` shows the binary values of the four
relocations::
@@ -79,14 +79,16 @@ The following is the symbol table with ``llvm-readelf -s test.o``::
The 6th entry is global variable ``g1`` with value 0.
Similarly, the second relocation is at ``.text`` offset ``0x18``, instruction 3,
-for global variable ``g2`` which has a symbol value 4, the offset
-from the start of ``.data`` section.
-
-The third and fourth relocations refers to static variables ``l1``
-and ``l2``. From ``.rel.text`` section above, it is not clear
-which symbols they really refers to as they both refers to
+has a type of ``R_BPF_64_64`` and refers to entry 7 in the symbol table.
+The second relocation resolves to global variable ``g2`` which has a symbol
+value 4. The symbol value represents the offset from the start of ``.data``
+section where the initial value of the global variable ``g2`` is stored.
+
+The third and fourth relocations refer to static variables ``l1``
+and ``l2``. From the ``.rel.text`` section above, it is not clear
+to which symbols they really refer as they both refer to
symbol table entry 4, symbol ``sec``, which has ``STT_SECTION`` type
-and represents a section. So for static variable or function,
+and represents a section. So for a static variable or function,
the section offset is written to the original insn
buffer, which is called ``A`` (addend). Looking at
above insn ``7`` and ``11``, they have section offset ``8`` and ``12``.
diff --git a/Documentation/bpf/map_hash.rst b/Documentation/bpf/map_hash.rst
index 8669426264c6..d2343952f2cb 100644
--- a/Documentation/bpf/map_hash.rst
+++ b/Documentation/bpf/map_hash.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0-only
.. Copyright (C) 2022 Red Hat, Inc.
+.. Copyright (C) 2022-2023 Isovalent, Inc.
===============================================
BPF_MAP_TYPE_HASH, with PERCPU and LRU Variants
@@ -29,7 +30,16 @@ will automatically evict the least recently used entries when the hash
table reaches capacity. An LRU hash maintains an internal LRU list that
is used to select elements for eviction. This internal LRU list is
shared across CPUs but it is possible to request a per CPU LRU list with
-the ``BPF_F_NO_COMMON_LRU`` flag when calling ``bpf_map_create``.
+the ``BPF_F_NO_COMMON_LRU`` flag when calling ``bpf_map_create``. The
+following table outlines the properties of LRU maps depending on the a
+map type and the flags used to create the map.
+
+======================== ========================= ================================
+Flag ``BPF_MAP_TYPE_LRU_HASH`` ``BPF_MAP_TYPE_LRU_PERCPU_HASH``
+======================== ========================= ================================
+**BPF_F_NO_COMMON_LRU** Per-CPU LRU, global map Per-CPU LRU, per-cpu map
+**!BPF_F_NO_COMMON_LRU** Global LRU, global map Global LRU, per-cpu map
+======================== ========================= ================================
Usage
=====
@@ -206,3 +216,44 @@ Userspace walking the map elements from the map declared above:
cur_key = &next_key;
}
}
+
+Internals
+=========
+
+This section of the document is targeted at Linux developers and describes
+aspects of the map implementations that are not considered stable ABI. The
+following details are subject to change in future versions of the kernel.
+
+``BPF_MAP_TYPE_LRU_HASH`` and variants
+--------------------------------------
+
+Updating elements in LRU maps may trigger eviction behaviour when the capacity
+of the map is reached. There are various steps that the update algorithm
+attempts in order to enforce the LRU property which have increasing impacts on
+other CPUs involved in the following operation attempts:
+
+- Attempt to use CPU-local state to batch operations
+- Attempt to fetch free nodes from global lists
+- Attempt to pull any node from a global list and remove it from the hashmap
+- Attempt to pull any node from any CPU's list and remove it from the hashmap
+
+This algorithm is described visually in the following diagram. See the
+description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of
+the corresponding operations:
+
+.. kernel-figure:: map_lru_hash_update.dot
+ :alt: Diagram outlining the LRU eviction steps taken during map update.
+
+ LRU hash eviction during map update for ``BPF_MAP_TYPE_LRU_HASH`` and
+ variants. See the dot file source for kernel function name code references.
+
+Map updates start from the oval in the top right "begin ``bpf_map_update()``"
+and progress through the graph towards the bottom where the result may be
+either a successful update or a failure with various error codes. The key in
+the top right provides indicators for which locks may be involved in specific
+operations. This is intended as a visual hint for reasoning about how map
+contention may impact update operations, though the map type and flags may
+impact the actual contention on those locks, based on the logic described in
+the table above. For instance, if the map is created with type
+``BPF_MAP_TYPE_LRU_PERCPU_HASH`` and flags ``BPF_F_NO_COMMON_LRU`` then all map
+properties would be per-cpu.
diff --git a/Documentation/bpf/map_lru_hash_update.dot b/Documentation/bpf/map_lru_hash_update.dot
new file mode 100644
index 000000000000..a0fee349d29c
--- /dev/null
+++ b/Documentation/bpf/map_lru_hash_update.dot
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022-2023 Isovalent, Inc.
+digraph {
+ node [colorscheme=accent4,style=filled] # Apply colorscheme to all nodes
+ graph [splines=ortho, nodesep=1]
+
+ subgraph cluster_key {
+ label = "Key\n(locks held during operation)";
+ rankdir = TB;
+
+ remote_lock [shape=rectangle,fillcolor=4,label="remote CPU LRU lock"]
+ hash_lock [shape=rectangle,fillcolor=3,label="hashtab lock"]
+ lru_lock [shape=rectangle,fillcolor=2,label="LRU lock"]
+ local_lock [shape=rectangle,fillcolor=1,label="local CPU LRU lock"]
+ no_lock [shape=rectangle,label="no locks held"]
+ }
+
+ begin [shape=oval,label="begin\nbpf_map_update()"]
+
+ // Nodes below with an 'fn_' prefix are roughly labeled by the C function
+ // names that initiate the corresponding logic in kernel/bpf/bpf_lru_list.c.
+ // Number suffixes and errno suffixes handle subsections of the corresponding
+ // logic in the function as of the writing of this dot.
+
+ // cf. __local_list_pop_free() / bpf_percpu_lru_pop_free()
+ local_freelist_check [shape=diamond,fillcolor=1,
+ label="Local freelist\nnode available?"];
+ use_local_node [shape=rectangle,
+ label="Use node owned\nby this CPU"]
+
+ // cf. bpf_lru_pop_free()
+ common_lru_check [shape=diamond,
+ label="Map created with\ncommon LRU?\n(!BPF_F_NO_COMMON_LRU)"];
+
+ fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2,
+ label="Flush local pending,
+ Rotate Global list, move
+ LOCAL_FREE_TARGET
+ from global -> local"]
+ // Also corresponds to:
+ // fn__local_list_flush()
+ // fn_bpf_lru_list_rotate()
+ fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2,
+ label="Able to free\nLOCAL_FREE_TARGET\nnodes?"]
+
+ fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3,
+ label="Shrink inactive list
+ up to remaining
+ LOCAL_FREE_TARGET
+ (global LRU -> local)"]
+ fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2,
+ label="> 0 entries in\nlocal free list?"]
+ fn___bpf_lru_list_shrink2 [shape=rectangle,fillcolor=2,
+ label="Steal one node from
+ inactive, or if empty,
+ from active global list"]
+ fn___bpf_lru_list_shrink3 [shape=rectangle,fillcolor=3,
+ label="Try to remove\nnode from hashtab"]
+
+ local_freelist_check2 [shape=diamond,label="Htab removal\nsuccessful?"]
+ common_lru_check2 [shape=diamond,
+ label="Map created with\ncommon LRU?\n(!BPF_F_NO_COMMON_LRU)"];
+
+ subgraph cluster_remote_lock {
+ label = "Iterate through CPUs\n(start from current)";
+ style = dashed;
+ rankdir=LR;
+
+ local_freelist_check5 [shape=diamond,fillcolor=4,
+ label="Steal a node from\nper-cpu freelist?"]
+ local_freelist_check6 [shape=rectangle,fillcolor=4,
+ label="Steal a node from
+ (1) Unreferenced pending, or
+ (2) Any pending node"]
+ local_freelist_check7 [shape=rectangle,fillcolor=3,
+ label="Try to remove\nnode from hashtab"]
+ fn_htab_lru_map_update_elem [shape=diamond,
+ label="Stole node\nfrom remote\nCPU?"]
+ fn_htab_lru_map_update_elem2 [shape=diamond,label="Iterated\nall CPUs?"]
+ // Also corresponds to:
+ // use_local_node()
+ // fn__local_list_pop_pending()
+ }
+
+ fn_bpf_lru_list_pop_free_to_local2 [shape=rectangle,
+ label="Use node that was\nnot recently referenced"]
+ local_freelist_check4 [shape=rectangle,
+ label="Use node that was\nactively referenced\nin global list"]
+ fn_htab_lru_map_update_elem_ENOMEM [shape=oval,label="return -ENOMEM"]
+ fn_htab_lru_map_update_elem3 [shape=rectangle,
+ label="Use node that was\nactively referenced\nin (another?) CPU's cache"]
+ fn_htab_lru_map_update_elem4 [shape=rectangle,fillcolor=3,
+ label="Update hashmap\nwith new element"]
+ fn_htab_lru_map_update_elem5 [shape=oval,label="return 0"]
+ fn_htab_lru_map_update_elem_EBUSY [shape=oval,label="return -EBUSY"]
+ fn_htab_lru_map_update_elem_EEXIST [shape=oval,label="return -EEXIST"]
+ fn_htab_lru_map_update_elem_ENOENT [shape=oval,label="return -ENOENT"]
+
+ begin -> local_freelist_check
+ local_freelist_check -> use_local_node [xlabel="Y"]
+ local_freelist_check -> common_lru_check [xlabel="N"]
+ common_lru_check -> fn_bpf_lru_list_pop_free_to_local [xlabel="Y"]
+ common_lru_check -> fn___bpf_lru_list_shrink_inactive [xlabel="N"]
+ fn_bpf_lru_list_pop_free_to_local -> fn___bpf_lru_node_move_to_free
+ fn___bpf_lru_node_move_to_free ->
+ fn_bpf_lru_list_pop_free_to_local2 [xlabel="Y"]
+ fn___bpf_lru_node_move_to_free ->
+ fn___bpf_lru_list_shrink_inactive [xlabel="N"]
+ fn___bpf_lru_list_shrink_inactive -> fn___bpf_lru_list_shrink
+ fn___bpf_lru_list_shrink -> fn_bpf_lru_list_pop_free_to_local2 [xlabel = "Y"]
+ fn___bpf_lru_list_shrink -> fn___bpf_lru_list_shrink2 [xlabel="N"]
+ fn___bpf_lru_list_shrink2 -> fn___bpf_lru_list_shrink3
+ fn___bpf_lru_list_shrink3 -> local_freelist_check2
+ local_freelist_check2 -> local_freelist_check4 [xlabel = "Y"]
+ local_freelist_check2 -> common_lru_check2 [xlabel = "N"]
+ common_lru_check2 -> local_freelist_check5 [xlabel = "Y"]
+ common_lru_check2 -> fn_htab_lru_map_update_elem_ENOMEM [xlabel = "N"]
+ local_freelist_check5 -> fn_htab_lru_map_update_elem [xlabel = "Y"]
+ local_freelist_check5 -> local_freelist_check6 [xlabel = "N"]
+ local_freelist_check6 -> local_freelist_check7
+ local_freelist_check7 -> fn_htab_lru_map_update_elem
+
+ fn_htab_lru_map_update_elem -> fn_htab_lru_map_update_elem3 [xlabel = "Y"]
+ fn_htab_lru_map_update_elem -> fn_htab_lru_map_update_elem2 [xlabel = "N"]
+ fn_htab_lru_map_update_elem2 ->
+ fn_htab_lru_map_update_elem_ENOMEM [xlabel = "Y"]
+ fn_htab_lru_map_update_elem2 -> local_freelist_check5 [xlabel = "N"]
+ fn_htab_lru_map_update_elem3 -> fn_htab_lru_map_update_elem4
+
+ use_local_node -> fn_htab_lru_map_update_elem4
+ fn_bpf_lru_list_pop_free_to_local2 -> fn_htab_lru_map_update_elem4
+ local_freelist_check4 -> fn_htab_lru_map_update_elem4
+
+ fn_htab_lru_map_update_elem4 -> fn_htab_lru_map_update_elem5 [headlabel="Success"]
+ fn_htab_lru_map_update_elem4 ->
+ fn_htab_lru_map_update_elem_EBUSY [xlabel="Hashtab lock failed"]
+ fn_htab_lru_map_update_elem4 ->
+ fn_htab_lru_map_update_elem_EEXIST [xlabel="BPF_EXIST set and\nkey already exists"]
+ fn_htab_lru_map_update_elem4 ->
+ fn_htab_lru_map_update_elem_ENOENT [headlabel="BPF_NOEXIST set\nand no such entry"]
+
+ // Create invisible pad nodes to line up various nodes
+ pad0 [style=invis]
+ pad1 [style=invis]
+ pad2 [style=invis]
+ pad3 [style=invis]
+ pad4 [style=invis]
+
+ // Line up the key with the top of the graph
+ no_lock -> local_lock [style=invis]
+ local_lock -> lru_lock [style=invis]
+ lru_lock -> hash_lock [style=invis]
+ hash_lock -> remote_lock [style=invis]
+ remote_lock -> local_freelist_check5 [style=invis]
+ remote_lock -> fn___bpf_lru_list_shrink [style=invis]
+
+ // Line up return code nodes at the bottom of the graph
+ fn_htab_lru_map_update_elem -> pad0 [style=invis]
+ pad0 -> pad1 [style=invis]
+ pad1 -> pad2 [style=invis]
+ //pad2-> fn_htab_lru_map_update_elem_ENOMEM [style=invis]
+ fn_htab_lru_map_update_elem4 -> pad3 [style=invis]
+ pad3 -> fn_htab_lru_map_update_elem5 [style=invis]
+ pad3 -> fn_htab_lru_map_update_elem_EBUSY [style=invis]
+ pad3 -> fn_htab_lru_map_update_elem_EEXIST [style=invis]
+ pad3 -> fn_htab_lru_map_update_elem_ENOENT [style=invis]
+
+ // Reduce diagram width by forcing some nodes to appear above others
+ local_freelist_check4 -> fn_htab_lru_map_update_elem3 [style=invis]
+ common_lru_check2 -> pad4 [style=invis]
+ pad4 -> local_freelist_check5 [style=invis]
+}
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index ec141c937b8b..b1d271778179 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -562,6 +562,13 @@ properties:
- const: leez,p710
- const: rockchip,rk3399
+ - description: Lunzn FastRhino R66S / R68S
+ items:
+ - enum:
+ - lunzn,fastrhino-r66s
+ - lunzn,fastrhino-r68s
+ - const: rockchip,rk3568
+
- description: mqmaker MiQi
items:
- const: mqmaker,miqi
diff --git a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
index 6a9c96f0352a..2c8cf6aab19a 100644
--- a/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
+++ b/Documentation/devicetree/bindings/extcon/qcom,pm8941-misc.yaml
@@ -27,10 +27,14 @@ properties:
interrupt-names:
minItems: 1
- items:
- - const: usb_id
- - const: usb_vbus
-
+ anyOf:
+ - items:
+ - const: usb_id
+ - const: usb_vbus
+ - items:
+ - const: usb_id
+ - items:
+ - const: usb_vbus
required:
- compatible
- reg
@@ -49,7 +53,7 @@ examples:
interrupt-controller;
#interrupt-cells = <4>;
- usb_id: misc@900 {
+ usb_id: usb-detect@900 {
compatible = "qcom,pm8941-misc";
reg = <0x900>;
interrupts = <0x0 0x9 0 IRQ_TYPE_EDGE_BOTH>;
diff --git a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml
index 4fb05eb84e2a..164331eb6275 100644
--- a/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml
+++ b/Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lattice Slave SPI sysCONFIG FPGA manager
maintainers:
- - Ivan Bornyakov <i.bornyakov@metrotek.ru>
+ - Vladimir Georgiev <v.georgiev@metrotek.ru>
description: |
Lattice sysCONFIG port, which is used for FPGA configuration, among others,
diff --git a/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml b/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
index 527532f039ce..a157eecfb5fc 100644
--- a/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
+++ b/Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip Polarfire FPGA manager.
maintainers:
- - Ivan Bornyakov <i.bornyakov@metrotek.ru>
+ - Vladimir Georgiev <v.georgiev@metrotek.ru>
description:
Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
index 318122d95eb5..0fae1ef013be 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.yaml
@@ -33,6 +33,7 @@ properties:
- rockchip,rk3228-mali
- samsung,exynos4210-mali
- stericsson,db8500-mali
+ - xlnx,zynqmp-mali
- const: arm,mali-400
- items:
- enum:
diff --git a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
index 63369ba388e4..0a192ca192c5 100644
--- a/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
@@ -39,6 +39,12 @@ properties:
power-domains:
maxItems: 1
+ vref-supply:
+ description: |
+ External ADC reference voltage supply on VREFH pad. If VERID[MVI] is
+ set, there are additional, internal reference voltages selectable.
+ VREFH1 is always from VREFH pad.
+
"#io-channel-cells":
const: 1
@@ -72,6 +78,7 @@ examples:
assigned-clocks = <&clk IMX_SC_R_ADC_0>;
assigned-clock-rates = <24000000>;
power-domains = <&pd IMX_SC_R_ADC_0>;
+ vref-supply = <&reg_1v8>;
#io-channel-cells = <1>;
};
};
diff --git a/Documentation/devicetree/bindings/input/atmel,maxtouch.yaml b/Documentation/devicetree/bindings/input/atmel,maxtouch.yaml
index 3ec579d63570..c40799355ed7 100644
--- a/Documentation/devicetree/bindings/input/atmel,maxtouch.yaml
+++ b/Documentation/devicetree/bindings/input/atmel,maxtouch.yaml
@@ -14,6 +14,9 @@ description: |
Atmel maXTouch touchscreen or touchpads such as the mXT244
and similar devices.
+allOf:
+ - $ref: input.yaml#
+
properties:
compatible:
const: atmel,maxtouch
@@ -60,6 +63,10 @@ properties:
or experiment to determine which bit corresponds to which input. Use
KEY_RESERVED for unused padding values.
+ linux,keycodes:
+ minItems: 1
+ maxItems: 8
+
atmel,wakeup-method:
$ref: /schemas/types.yaml#/definitions/uint32
description: |
diff --git a/Documentation/devicetree/bindings/input/pwm-vibrator.yaml b/Documentation/devicetree/bindings/input/pwm-vibrator.yaml
index d32716c604fe..6398534b43c3 100644
--- a/Documentation/devicetree/bindings/input/pwm-vibrator.yaml
+++ b/Documentation/devicetree/bindings/input/pwm-vibrator.yaml
@@ -32,6 +32,8 @@ properties:
minItems: 1
maxItems: 2
+ enable-gpios: true
+
vcc-supply: true
direction-duty-cycle-ns:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml b/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
index 1959ec394768..4080422a9eb5 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
@@ -40,6 +40,8 @@ properties:
linux,keycodes:
description: EV_ABS specific event code generated by the axis.
+ wakeup-source: true
+
patternProperties:
"^button@[0-9]+$":
type: object
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml
index 8b389314c352..e2ffe0a9c26b 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2685.yaml
@@ -49,6 +49,7 @@ properties:
properties:
data-lanes:
+ minItems: 1
maxItems: 2
required:
diff --git a/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt
deleted file mode 100644
index 3dc8b3d2ffbb..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-BROADCOM KONA USB2 PHY
-
-Required properties:
- - compatible: brcm,kona-usb2-phy
- - reg: offset and length of the PHY registers
- - #phy-cells: must be 0
-Refer to phy/phy-bindings.txt for the generic PHY binding properties
-
-Example:
-
- usbphy: usb-phy@3f130000 {
- compatible = "brcm,kona-usb2-phy";
- reg = <0x3f130000 0x28>;
- #phy-cells = <0>;
- };
diff --git a/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml
new file mode 100644
index 000000000000..d7faeb81f7a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,kona-usb2-phy.yaml
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,kona-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Kona family USB 2.0 PHY
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ const: brcm,kona-usb2-phy
+
+ reg:
+ maxItems: 1
+
+ '#phy-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@3f130000 {
+ compatible = "brcm,kona-usb2-phy";
+ reg = <0x3f130000 0x28>;
+ #phy-cells = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
index c4f8e6ffa5c3..6566353f1a02 100644
--- a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
@@ -43,6 +43,9 @@ properties:
"#phy-cells":
const: 0
+ power-domains:
+ maxItems: 1
+
vdda-phy-supply: true
vdda-pll-supply: true
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index 94c0fab065a8..a1897a7606df 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -78,9 +78,9 @@ allOf:
then:
properties:
clocks:
- maxItems: 3
+ minItems: 3
clock-names:
- maxItems: 3
+ minItems: 3
else:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
index 16fce1038285..c61cea4835bb 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb3-uni-phy.yaml
@@ -16,6 +16,7 @@ description:
properties:
compatible:
enum:
+ - qcom,sa8775p-qmp-usb3-uni-phy
- qcom,sc8280xp-qmp-usb3-uni-phy
reg:
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
index a26524b7e7b7..0f200e3f97a9 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,usb-snps-femto-v2-phy
- items:
- enum:
+ - qcom,sa8775p-usb-hs-phy
- qcom,sc8280xp-usb-hs-phy
- const: qcom,usb-snps-hs-5nm-phy
- items:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index eaadd5a9a445..8aaf50181cef 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -49,6 +49,7 @@ properties:
- qcom,pm8921-gpio
- qcom,pm8941-gpio
- qcom,pm8950-gpio
+ - qcom,pm8953-gpio
- qcom,pm8994-gpio
- qcom,pm8998-gpio
- qcom,pma8084-gpio
@@ -175,6 +176,7 @@ allOf:
- qcom,pm8350b-gpio
- qcom,pm8550ve-gpio
- qcom,pm8950-gpio
+ - qcom,pm8953-gpio
- qcom,pmi632-gpio
then:
properties:
@@ -434,6 +436,7 @@ $defs:
- gpio1-gpio44 for pm8921
- gpio1-gpio36 for pm8941
- gpio1-gpio8 for pm8950 (hole on gpio3)
+ - gpio1-gpio8 for pm8953 (hole on gpio3 and gpio6)
- gpio1-gpio22 for pm8994
- gpio1-gpio26 for pm8998
- gpio1-gpio22 for pma8084
diff --git a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt
deleted file mode 100644
index 752d6126d5da..000000000000
--- a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-NVMEM reboot mode driver
-
-This driver gets reboot mode magic value from reboot-mode driver
-and stores it in a NVMEM cell named "reboot-mode". Then the bootloader
-can read it and take different action according to the magic
-value stored.
-
-Required properties:
-- compatible: should be "nvmem-reboot-mode".
-- nvmem-cells: A phandle to the reboot mode provided by a nvmem device.
-- nvmem-cell-names: Should be "reboot-mode".
-
-The rest of the properties should follow the generic reboot-mode description
-found in reboot-mode.txt
-
-Example:
- reboot-mode {
- compatible = "nvmem-reboot-mode";
- nvmem-cells = <&reboot_mode>;
- nvmem-cell-names = "reboot-mode";
-
- mode-normal = <0xAAAA5501>;
- mode-bootloader = <0xBBBB5500>;
- mode-recovery = <0xCCCC5502>;
- mode-test = <0xDDDD5503>;
- };
diff --git a/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
new file mode 100644
index 000000000000..14a262bcbf7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/nvmem-reboot-mode.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/nvmem-reboot-mode.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic NVMEM reboot mode
+
+maintainers:
+ - Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
+
+description:
+ This driver gets the reboot mode magic value from the reboot-mode driver
+ and stores it in the NVMEM cell named "reboot-mode". The bootloader can
+ then read it and take different action according to the value.
+
+properties:
+ compatible:
+ const: nvmem-reboot-mode
+
+ nvmem-cells:
+ description:
+ A phandle pointing to the nvmem-cells node where the vendor-specific
+ magic value representing the reboot mode is stored.
+ maxItems: 1
+
+ nvmem-cell-names:
+ items:
+ - const: reboot-mode
+
+patternProperties:
+ "^mode-.+":
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Vendor-specific mode value written to the mode register
+
+required:
+ - compatible
+ - nvmem-cells
+ - nvmem-cell-names
+
+additionalProperties: false
+
+examples:
+ - |
+ reboot-mode {
+ compatible = "nvmem-reboot-mode";
+ nvmem-cells = <&reboot_reason>;
+ nvmem-cell-names = "reboot-mode";
+ mode-recovery = <0x01>;
+ mode-bootloader = <0x02>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
index d96170eecbd2..0b1eca734d3b 100644
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.yaml
@@ -56,7 +56,6 @@ required:
unevaluatedProperties: false
allOf:
- - $ref: reboot-mode.yaml#
- if:
properties:
compatible:
@@ -66,6 +65,9 @@ allOf:
- qcom,pms405-pon
- qcom,pm8998-pon
then:
+ allOf:
+ - $ref: reboot-mode.yaml#
+
properties:
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
index 82f382a7ffb3..4fe9c3705265 100644
--- a/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq256xx.yaml
@@ -68,11 +68,29 @@ properties:
Interrupt sends an active low, 256 μs pulse to host to report the charger
device status and faults.
+ ti,no-thermistor:
+ type: boolean
+ description: Indicates that no thermistor is connected to the TS pin
+
required:
- compatible
- reg
- monitored-battery
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,bq25600
+ - ti,bq25601
+ - ti,bq25600d
+ - ti,bq25601d
+ then:
+ properties:
+ ti,no-thermistor: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/reset/oxnas,reset.txt b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
deleted file mode 100644
index d27ccb5d04fc..000000000000
--- a/Documentation/devicetree/bindings/reset/oxnas,reset.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-Oxford Semiconductor OXNAS SoC Family RESET Controller
-================================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: For OX810SE, should be "oxsemi,ox810se-reset"
- For OX820, should be "oxsemi,ox820-reset"
-- #reset-cells: 1, see below
-
-Parent node should have the following properties :
-- compatible: For OX810SE, should be :
- "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
- For OX820, should be :
- "oxsemi,ox820-sys-ctrl", "syscon", "simple-mfd"
-
-Reset indices are in dt-bindings include files :
-- For OX810SE: include/dt-bindings/reset/oxsemi,ox810se.h
-- For OX820: include/dt-bindings/reset/oxsemi,ox820.h
-
-example:
-
-sys: sys-ctrl@000000 {
- compatible = "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd";
- reg = <0x000000 0x100000>;
-
- reset: reset-controller {
- compatible = "oxsemi,ox810se-reset";
- #reset-cells = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 65a2d5a4f28d..e8fca419cc8e 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -52,6 +52,7 @@ properties:
- rockchip,rk3399-pmugrf
- rockchip,rk3568-grf
- rockchip,rk3568-pmugrf
+ - rockchip,rk3588-usb2phy-grf
- rockchip,rv1108-grf
- rockchip,rv1108-pmugrf
- rockchip,rv1126-grf
@@ -199,6 +200,7 @@ allOf:
- rockchip,rk3308-usb2phy-grf
- rockchip,rk3328-usb2phy-grf
- rockchip,rk3399-grf
+ - rockchip,rk3588-usb2phy-grf
- rockchip,rv1108-grf
then:
diff --git a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
index e4dba825ab11..fb44b89a754e 100644
--- a/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
+++ b/Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
@@ -21,6 +21,7 @@ properties:
- qcom,soundwire-v1.5.1
- qcom,soundwire-v1.6.0
- qcom,soundwire-v1.7.0
+ - qcom,soundwire-v2.0.0
reg:
maxItems: 1
@@ -80,18 +81,29 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-sinterval-low:
$ref: /schemas/types.yaml#/definitions/uint8-array
description:
- Sample interval low of each data port.
+ Sample interval (only lowest byte) of each data port.
Out ports followed by In ports. Used for Sample Interval calculation.
Value of 0xff indicates that this option is not implemented
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
+
+ qcom,ports-sinterval:
+ $ref: /schemas/types.yaml#/definitions/uint16-array
+ description:
+ Sample interval of each data port.
+ Out ports followed by In ports. Used for Sample Interval calculation.
+ Value of 0xffff indicates that this option is not implemented
+ or applicable for the respective data port.
+ More info in MIPI Alliance SoundWire 1.0 Specifications.
+ minItems: 3
+ maxItems: 16
qcom,ports-offset1:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -102,7 +114,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-offset2:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -113,7 +125,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-lane-control:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -124,7 +136,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
qcom,ports-block-pack-mode:
$ref: /schemas/types.yaml#/definitions/uint8-array
@@ -137,7 +149,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -154,7 +166,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -171,7 +183,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -187,7 +199,7 @@ properties:
or applicable for the respective data port.
More info in MIPI Alliance SoundWire 1.0 Specifications.
minItems: 3
- maxItems: 8
+ maxItems: 16
items:
oneOf:
- minimum: 0
@@ -219,10 +231,15 @@ required:
- '#size-cells'
- qcom,dout-ports
- qcom,din-ports
- - qcom,ports-sinterval-low
- qcom,ports-offset1
- qcom,ports-offset2
+oneOf:
+ - required:
+ - qcom,ports-sinterval-low
+ - required:
+ - qcom,ports-sinterval
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
index ee8f7ea907b0..1696ac46a660 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qcom-qspi.yaml
@@ -29,6 +29,9 @@ properties:
reg:
maxItems: 1
+ iommus:
+ maxItems: 1
+
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
index 20f77246d365..226d8b493b57 100644
--- a/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-zynqmp-qspi.yaml
@@ -32,6 +32,12 @@ properties:
clocks:
maxItems: 2
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index d84281926f10..4a36e2b6c8fb 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -23,6 +23,7 @@ properties:
- qcom,msm8998-dwc3
- qcom,qcm2290-dwc3
- qcom,qcs404-dwc3
+ - qcom,sa8775p-dwc3
- qcom,sc7180-dwc3
- qcom,sc7280-dwc3
- qcom,sc8280xp-dwc3
@@ -180,6 +181,7 @@ allOf:
- qcom,msm8953-dwc3
- qcom,msm8996-dwc3
- qcom,msm8998-dwc3
+ - qcom,sa8775p-dwc3
- qcom,sc7180-dwc3
- qcom,sc7280-dwc3
- qcom,sdm670-dwc3
@@ -455,6 +457,25 @@ allOf:
- const: dm_hs_phy_irq
- const: ss_phy_irq
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sa8775p-dwc3
+ then:
+ properties:
+ interrupts:
+ minItems: 3
+ maxItems: 4
+ interrupt-names:
+ minItems: 3
+ items:
+ - const: pwr_event
+ - const: dp_hs_phy_irq
+ - const: dm_hs_phy_irq
+ - const: ss_phy_irq
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 82d39ab0231b..c3d426509e7e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -781,6 +781,8 @@ patternProperties:
description: Nanjing Loongmasses Ltd.
"^lsi,.*":
description: LSI Corp. (LSI Logic)
+ "^lunzn,.*":
+ description: Shenzhen Lunzn Technology Co., Ltd.
"^lwn,.*":
description: Liebherr-Werk Nenzing GmbH
"^lxa,.*":
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index c57745375edb..9359978a5af2 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -351,6 +351,22 @@ age_extent_cache Enable an age extent cache based on rb-tree. It records
data block update frequency of the extent per inode, in
order to provide better temperature hints for data block
allocation.
+errors=%s Specify f2fs behavior on critical errors. This supports modes:
+ "panic", "continue" and "remount-ro", respectively, trigger
+ panic immediately, continue without doing anything, and remount
+ the partition in read-only mode. By default it uses "continue"
+ mode.
+ ====================== =============== =============== ========
+ mode continue remount-ro panic
+ ====================== =============== =============== ========
+ access ops normal noraml N/A
+ syscall errors -EIO -EROFS N/A
+ mount option rw ro N/A
+ pending dir write keep keep N/A
+ pending non-dir write drop keep N/A
+ pending node write drop keep N/A
+ pending meta write keep keep N/A
+ ====================== =============== =============== ========
======================== ============================================================
Debugfs Entries
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 7897a7dafcbc..6ccb57089a06 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -566,6 +566,7 @@ encoded manner. The codes are the following:
mt arm64 MTE allocation tags are enabled
um userfaultfd missing tracking
uw userfaultfd wr-protect tracking
+ ss shadow stack page
== =======================================
Note that there is no guarantee that every flag and associated mnemonic will
diff --git a/Documentation/hwmon/asus_ec_sensors.rst b/Documentation/hwmon/asus_ec_sensors.rst
index c92c1d3839e4..7e3cd5b6686f 100644
--- a/Documentation/hwmon/asus_ec_sensors.rst
+++ b/Documentation/hwmon/asus_ec_sensors.rst
@@ -14,6 +14,7 @@ Supported boards:
* ROG CROSSHAIR VIII FORMULA
* ROG CROSSHAIR VIII HERO
* ROG CROSSHAIR VIII IMPACT
+ * ROG CROSSHAIR X670E HERO
* ROG MAXIMUS XI HERO
* ROG MAXIMUS XI HERO (WI-FI)
* ROG STRIX B550-E GAMING
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
index fa1208c62855..03b30a94a9e6 100644
--- a/Documentation/hwmon/index.rst
+++ b/Documentation/hwmon/index.rst
@@ -9,7 +9,6 @@ Hardware Monitoring
hwmon-kernel-api
pmbus-core
- inspur-ipsps1
submitting-patches
sysfs-interface
userspace-tools
@@ -85,6 +84,7 @@ Hardware Monitoring Kernel Drivers
ina2xx
ina238
ina3221
+ inspur-ipsps1
intel-m10-bmc-hwmon
ir35221
ir38064
diff --git a/Documentation/hwmon/oxp-sensors.rst b/Documentation/hwmon/oxp-sensors.rst
index 566a8d5bde08..4ab442301415 100644
--- a/Documentation/hwmon/oxp-sensors.rst
+++ b/Documentation/hwmon/oxp-sensors.rst
@@ -25,8 +25,10 @@ Supported devices
Currently the driver supports the following handhelds:
- AOK ZOE A1
+ - Aya Neo 2
- Aya Neo AIR
- Aya Neo AIR Pro
+ - Aya Neo Geek
- OneXPlayer AMD
- OneXPlayer mini AMD
- OneXPlayer mini AMD PRO
diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
index af3891f895b0..c19a93c44e4c 100644
--- a/Documentation/mm/arch_pgtable_helpers.rst
+++ b/Documentation/mm/arch_pgtable_helpers.rst
@@ -46,7 +46,8 @@ PTE Page Table Helpers
+---------------------------+--------------------------------------------------+
| pte_mkclean | Creates a clean PTE |
+---------------------------+--------------------------------------------------+
-| pte_mkwrite | Creates a writable PTE |
+| pte_mkwrite | Creates a writable PTE of the type specified by |
+| | the VMA. |
+---------------------------+--------------------------------------------------+
| pte_wrprotect | Creates a write protected PTE |
+---------------------------+--------------------------------------------------+
@@ -118,7 +119,8 @@ PMD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pmd_mkclean | Creates a clean PMD |
+---------------------------+--------------------------------------------------+
-| pmd_mkwrite | Creates a writable PMD |
+| pmd_mkwrite | Creates a writable PMD of the type specified by |
+| | the VMA. |
+---------------------------+--------------------------------------------------+
| pmd_wrprotect | Creates a write protected PMD |
+---------------------------+--------------------------------------------------+
@@ -222,7 +224,8 @@ HugeTLB Page Table Helpers
+---------------------------+--------------------------------------------------+
| huge_pte_mkdirty | Creates a dirty HugeTLB |
+---------------------------+--------------------------------------------------+
-| huge_pte_mkwrite | Creates a writable HugeTLB |
+| huge_pte_mkwrite | Creates a writable HugeTLB of the type specified |
+| | by the VMA. |
+---------------------------+--------------------------------------------------+
| huge_pte_wrprotect | Creates a write protected HugeTLB |
+---------------------------+--------------------------------------------------+
diff --git a/MAINTAINERS b/MAINTAINERS
index 7e0b87d5aa2e..084cf8c7fd52 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1961,7 +1961,7 @@ F: Documentation/devicetree/bindings/nvmem/apple,efuses.yaml
F: Documentation/devicetree/bindings/pci/apple,pcie.yaml
F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
F: Documentation/devicetree/bindings/power/apple*
-F: Documentation/devicetree/bindings/pwm/pwm-apple.yaml
+F: Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml
F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
F: arch/arm64/boot/dts/apple/
F: drivers/bluetooth/hci_bcm4377.c
@@ -4513,6 +4513,13 @@ S: Supported
F: Documentation/filesystems/caching/cachefiles.rst
F: fs/cachefiles/
+CACHESTAT: PAGE CACHE STATS FOR A FILE
+M: Nhat Pham <nphamcs@gmail.com>
+M: Johannes Weiner <hannes@cmpxchg.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: tools/testing/selftests/cachestat/test_cachestat.c
+
CADENCE MIPI-CSI2 BRIDGES
M: Maxime Ripard <mripard@kernel.org>
L: linux-media@vger.kernel.org
@@ -8100,7 +8107,7 @@ F: drivers/fpga/intel-m10-bmc-sec-update.c
MICROCHIP POLARFIRE FPGA DRIVERS
M: Conor Dooley <conor.dooley@microchip.com>
-R: Ivan Bornyakov <i.bornyakov@metrotek.ru>
+R: Vladimir Georgiev <v.georgiev@metrotek.ru>
L: linux-fpga@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
@@ -9970,8 +9977,9 @@ M: Miquel Raynal <miquel.raynal@bootlin.com>
L: linux-wpan@vger.kernel.org
S: Maintained
W: https://linux-wpan.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
+Q: https://patchwork.kernel.org/project/linux-wpan/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wpan/wpan.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wpan/wpan-next.git
F: Documentation/networking/ieee802154.rst
F: drivers/net/ieee802154/
F: include/linux/ieee802154.h
@@ -18154,6 +18162,8 @@ Q: https://patchwork.kernel.org/project/linux-riscv/list/
T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
F: Documentation/devicetree/bindings/riscv/
F: arch/riscv/boot/dts/
+X: arch/riscv/boot/dts/allwinner/
+X: arch/riscv/boot/dts/renesas/
RNBD BLOCK DRIVERS
M: Md. Haris Iqbal <haris.iqbal@ionos.com>
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index ba43cb841d19..fb5d207c2a89 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -256,9 +256,13 @@ extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
-extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
+extern inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ pte_val(pte) &= ~_PAGE_FOW;
+ return pte;
+}
/*
* The smp_rmb() in the following functions are required to order the load of
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 5001b796fb8d..223a96967188 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -21,7 +21,7 @@ static inline pmd_t pte_pmd(pte_t pte)
}
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd, vma) pte_pmd(pte_mkwrite(pmd_pte(pmd), (vma)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h
index 6e9f8ca6d6a1..a5b8bc955015 100644
--- a/arch/arc/include/asm/pgtable-bits-arcv2.h
+++ b/arch/arc/include/asm/pgtable-bits-arcv2.h
@@ -87,7 +87,6 @@
PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
-PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
@@ -95,6 +94,12 @@ PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ pte_val(pte) |= (_PAGE_WRITE);
+ return pte;
+}
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
diff --git a/arch/arm/arm-soc-for-next-contents.txt b/arch/arm/arm-soc-for-next-contents.txt
new file mode 100644
index 000000000000..c98e20187ff1
--- /dev/null
+++ b/arch/arm/arm-soc-for-next-contents.txt
@@ -0,0 +1,217 @@
+arm/soc
+
+soc/dt
+ patch
+ ARM: dts: ixp4xx: use "okay" for status
+ arm64: dts: sprd: Add support for Unisoc's UMS512
+ dt/cleanup-64
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-dt tags/dt64-cleanup-6.4
+ dt/cleanup-32
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-dt tags/dt-cleanup-6.4
+ renesas/dt-bindinds
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-dt-bindings-for-v6.4-tag1
+ renesas/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-dts-for-v6.4-tag1
+ omap/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v6.4/dt-signed
+ omap/dt-overlays
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v6.4/dt-overlays-signed
+ at91/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/at91/linux tags/at91-dt-6.4
+ patch
+ ARM: dts: oxnas: remove obsolete device tree files
+ dt-bindings: arm: oxnas: remove obsolete bindings
+ amlogic/dt
+ https://git.kernel.org/pub/scm/linux/kernel/git/amlogic/linux tags/amlogic-arm-dt-for-v6.4
+ amlogic/dt64
+ https://git.kernel.org/pub/scm/linux/kernel/git/amlogic/linux tags/amlogic-arm64-dt-for-v6.4
+ samsung/dt64
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux tags/samsung-dt64-6.4
+ samsung/dt
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux tags/samsung-dt-6.4
+ apple/dt
+ https://github.com/AsahiLinux/linux tags/asahi-soc-dt-6.4
+ tegra/dt-bindings
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-dt-bindings
+ tegra/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-arm-dt
+ tegra/dt64
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-arm64-dt
+ riscv/dt
+ https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux tags/riscv-dt-for-v6.4
+ stm32/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32 tags/stm32-dt-for-v6.4-1
+ imx/dt-bindings
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-bindings-6.4
+ imx/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-dt-6.4
+ imx/dt64
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-dt64-6.4
+ sunxi/dt
+ https://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux tags/sunxi-dt-for-6.4-1
+ rockchip/dt64
+ git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip tags/v6.4-rockchip-dts64-1
+ rockchip/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip tags/v6.4-rockchip-dts32-1
+ renesas/dt-2
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-dts-for-v6.4-tag2
+ k3/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux tags/ti-k3-dt-for-v6.4
+ qcom/dt
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-dts-for-6.4
+ qcom/dt64
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-arm64-for-6.4
+ patch
+ ARM: dts: nomadik: Replace deprecated spi-gpio properties
+ broadcom/dt
+ https://github.com/Broadcom/stblinux tags/arm-soc/for-6.4/devicetree
+ broadcom/dt64
+ https://github.com/Broadcom/stblinux tags/arm-soc/for-6.4/devicetree-arm64
+ aspeed/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/joel/bmc tags/aspeed-6.4-devicetree
+ qcom/dt-2
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-dts-for-6.4-2
+ qcom/dt64-2
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-arm64-for-6.4-2
+ mediatek/dt
+ https://git.kernel.org/pub/scm/linux/kernel/git/matthias.bgg/linux tags/v6.3-next-dts32
+ mediatek/dt64
+ https://git.kernel.org/pub/scm/linux/kernel/git/matthias.bgg/linux tags/v6.3-next-dts64
+ apple/dt-2
+ https://github.com/AsahiLinux/linux tags/asahi-soc-dt-6.4-v2
+ rockchips/dt64-2
+ git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip tags/v6.4-rockchip-dts64-2
+ mvebu/dt
+ git://git.kernel.org/pub/scm/linux/kernel/git/gclement/mvebu tags/mvebu-dt-6.4-1
+ mvebu/dt64
+ git://git.kernel.org/pub/scm/linux/kernel/git/gclement/mvebu tags/mvebu-dt64-6.4-1
+
+soc/drivers
+ renesas/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-drivers-for-v6.4-tag1
+ omap/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v6.4/ti-sysc-signed
+ drivers/memory
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl tags/memory-controller-drv-6.4
+ drivers/optee
+ https://git.linaro.org/people/jens.wiklander/linux-tee tags/optee-per-cpu-irq-for-v6.4
+ amlogic/drivers
+ https://git.kernel.org/pub/scm/linux/kernel/git/amlogic/linux tags/amlogic-drivers-for-v6.4
+ drivers/optee-load
+ https://git.linaro.org/people/jens.wiklander/linux-tee tags/optee-load-for-v6.4
+ apple/soc-drivers
+ https://github.com/AsahiLinux/linux tags/asahi-soc-rtkit-6.4
+ tegra/bus-drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-arm-core
+ tegra/soc-drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-soc
+ tegra/firmware-drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-firmware
+ riscv/drivers
+ https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux tags/riscv-soc-for-v6.4
+ imx/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-drivers-6.4
+ sunxi/drivers
+ https://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux tags/sunxi-drivers-for-6.4-1
+ renesas/drivers-2
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-drivers-for-v6.4-tag2
+ k3/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux tags/ti-driver-soc-for-v6.4
+ qcom/drivers
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-drivers-for-6.4
+ broadcom/drivers
+ https://github.com/Broadcom/stblinux tags/arm-soc/for-6.4/drivers
+ qcom/drivers-2
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-drivers-for-6.4-2
+ patch
+ soc: ti: smartreflex: Simplify getting the opam_sr pointer
+ mediatek/soc-drivers
+ https://git.kernel.org/pub/scm/linux/kernel/git/matthias.bgg/linux tags/v6.3-next-soc
+ drivers/memory-2
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl tags/memory-controller-drv-6.4-2
+ vexpress/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux tags/vexpress-update-6.4
+ scmi/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux tags/scmi-updates-6.4
+
+arm/defconfig
+
+arm/late
+
+soc/fixes
+
+arm/fixes
+ <no branch> (172fa6366c0c84eda31f1bc34e6c3e4698786215)
+ https://git.linaro.org/people/jens.wiklander/linux-tee tags/optee-fix-for-v6.3
+ <no branch> (f9d323e7c1724270d747657051099826744e91e7)
+ https://git.kernel.org/pub/scm/linux/kernel/git/amlogic/linux tags/amlogic-fixes-v6.3-rc
+ <no branch> (8671133082176d1388e20ac33d61cf7e3b05adf5)
+ https://git.linaro.org/people/jens.wiklander/linux-tee tags/tee-fix-for-v6.3
+ <no branch> (86d5b27b379256cd5d48974b4cd7ad03091eea6b)
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-fixes-6.3-2
+ <no branch> (8056dc043d7f74d7675d413cb3dc4fa290609922)
+ https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux tags/riscv-dt-fixes-for-v6.3-final
+ patch
+ firmware/psci: demote suspend-mode warning to info level
+ <no branch> (60a655debd36e3278a46872accc1a51a54f94f02)
+ git://git.kernel.org/pub/scm/linux/kernel/git/mmind/linux-rockchip tags/v6.3-rockchip-dtsfixes1
+ <no branch> (75eab749e7aec0b7b515d7c50ed429ef4e1c5f3f)
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-arm64-fixes-for-6.3-2
+
+soc/arm
+ patch
+ ARM: Convert to platform remove callback returning void
+ ARM: mmp: remove obsolete config USB_EHCI_MV_U2O
+ ARM: spear: remove obsolete config MACH_SPEAR600
+ ARM: mstar: remove unused config MACH_MERCURY
+ omap/cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v6.4/cleanup-signed
+ omap1/cleanup
+ git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap tags/omap-for-v6.4/omap1-signed
+ patch
+ ARM: oxnas: remove OXNAS support
+ samsung/soc
+ https://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux tags/samsung-soc-6.4
+ imx/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-soc-6.4
+ renesas/soc
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-arm-soc-for-v6.4-tag1
+ broadcom/soc
+ https://github.com/Broadcom/stblinux tags/arm-soc/for-6.4/soc
+ patch
+ ARM: pxa: Use of_property_read_bool() for boolean properties
+ soc: fsl: Use of_property_present() for testing DT property presence
+ ARM: mv78xx0: adjust init logic for ts-wxl to reflect single core dev
+ ARM: mv78xx0: set the correct driver for the i2c RTC
+ ARM: mv78xx0: add code to enable XOR and CRYPTO engines on mv78xx0
+ ARM: mv78xx0: fix entries for gpios, buttons and usb ports
+ mvebu/drivers
+ git://git.kernel.org/pub/scm/linux/kernel/git/gclement/mvebu tags/mvebu-arm64-6.4-1
+
+soc/defconfig
+ patch
+ arm64: virtconfig: Further shrink the config
+ arm64: defconfig: Enable CAN PHY transceiver driver
+ arm64: defconfig: Enable Virtio RNG driver as built in
+ renesas/defconfig
+ git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel tags/renesas-arm-defconfig-for-v6.4-tag1
+ patch
+ ARM: configs: remove oxnas_v6_defconfig
+ ARM: configs: Update U8500 defconfig
+ ARM: multi_v7_defconfig: Add OPTEE support
+ qcom/defconfig
+ https://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux tags/qcom-arm64-defconfig-for-6.4
+ k3/defconfig
+ git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux tags/ti-k3-config-for-v6.4
+ imx/defconfig
+ git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux tags/imx-defconfig-6.4
+ tegra/defconfig
+ git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux tags/tegra-for-6.4-arm64-defconfig
+ patch
+ arm64: defconfig: Enable crypto test module
+ arm64: defconfig: Enable security accelerator driver for TI K3 SoCs
+ arm64: defconfig: Enable TI TSCADC driver
+ arm64: defconfig: Enable TI ADC driver
+ arm64: defconfig: enable building the nvmem-reboot-mode module
+ ARM: config: Update Vexpress defconfig
+
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 8dd6976ab0a7..434025331041 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -201,8 +201,8 @@
#size-cells = <0>;
};
- camera: camera {
- compatible = "samsung,fimc", "simple-bus";
+ camera: camera@11800000 {
+ compatible = "samsung,fimc";
status = "disabled";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index ff6ee4b2c31b..bfb04b31e11b 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -150,8 +150,6 @@
};
&camera {
- pinctrl-names = "default";
- pinctrl-0 = <>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 8fe0d5d2be2d..c84af3d27c1c 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -197,9 +197,6 @@
&camera {
status = "okay";
-
- pinctrl-names = "default";
- pinctrl-0 = <>;
};
&cpu0 {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 45ef7b7ba7e0..93ddbd4b0a18 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -122,8 +122,6 @@
&camera {
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <>;
};
&clock {
diff --git a/arch/arm/boot/dts/s5pv210-pinctrl.dtsi b/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
index 6d6daef9fb7a..ae0b4a423746 100644
--- a/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
+++ b/arch/arm/boot/dts/s5pv210-pinctrl.dtsi
@@ -19,7 +19,7 @@
#include "s5pv210-pinctrl.h"
#define PIN_SLP(_pin, _mode, _pull) \
- _pin { \
+ pin- ## _pin { \
samsung,pins = #_pin; \
samsung,pin-con-pdn = <S5PV210_PIN_PDN_ ##_mode>; \
samsung,pin-pud-pdn = <S5PV210_PIN_PULL_ ##_pull>; \
diff --git a/arch/arm/boot/dts/s5pv210-smdkv210.dts b/arch/arm/boot/dts/s5pv210-smdkv210.dts
index fbae768d65e2..6e26c67e0a26 100644
--- a/arch/arm/boot/dts/s5pv210-smdkv210.dts
+++ b/arch/arm/boot/dts/s5pv210-smdkv210.dts
@@ -55,6 +55,14 @@
default-brightness-level = <6>;
pinctrl-names = "default";
pinctrl-0 = <&pwm3_out>;
+ power-supply = <&dc5v_reg>;
+ };
+
+ dc5v_reg: regulator-0 {
+ compatible = "regulator-fixed";
+ regulator-name = "DC5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
};
};
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index 1a9e4a96b2ff..faa3682ab5dd 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -401,7 +401,7 @@
status = "disabled";
};
- hsotg: hsotg@ec000000 {
+ hsotg: usb@ec000000 {
compatible = "samsung,s3c6400-hsotg";
reg = <0xec000000 0x20000>;
interrupt-parent = <&vic1>;
@@ -452,8 +452,8 @@
reg = <0xf1700000 0x10000>;
interrupt-parent = <&vic2>;
interrupts = <14>;
- clocks = <&clocks DOUT_MFC>, <&clocks CLK_MFC>;
- clock-names = "sclk_mfc", "mfc";
+ clocks = <&clocks CLK_MFC>, <&clocks DOUT_MFC>;
+ clock-names = "mfc", "sclk_mfc";
};
vic0: interrupt-controller@f2000000 {
@@ -547,10 +547,8 @@
status = "disabled";
};
- camera: camera {
- compatible = "samsung,fimc", "simple-bus";
- pinctrl-names = "default";
- pinctrl-0 = <>;
+ camera: camera@fa600000 {
+ compatible = "samsung,fimc";
clocks = <&clocks SCLK_CAM0>, <&clocks SCLK_CAM1>;
clock-names = "sclk_cam0", "sclk_cam1";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 3b88209bacea..ff1f9a1bcfcf 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -132,6 +132,7 @@
reg = <0x2c0f0000 0x1000>;
interrupts = <0 84 4>;
cache-level = <2>;
+ cache-unified;
};
pmu {
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 106049791500..df071a807610 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -202,11 +202,16 @@ static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
-PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ pmd_val(pmd) |= L_PMD_SECT_RDONLY;
+ return pmd;
+}
+
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a58ccbb406ad..39ad1ae1308d 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -227,7 +227,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index e07f359254c3..9a3c9de5ac5e 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -681,7 +681,7 @@ asmlinkage void do_rseq_syscall(struct pt_regs *regs)
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 53be7ea6181b..9d2192156087 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -308,6 +308,29 @@ static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
return URC_OK;
}
+static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
+{
+ unsigned long bytes = 0;
+ unsigned long insn;
+ unsigned long result = 0;
+
+ /*
+ * unwind_get_byte() will advance `ctrl` one instruction at a time, so
+ * loop until we get an instruction byte where bit 7 is not set.
+ *
+ * Note: This decodes a maximum of 4 bytes to output 28 bits data where
+ * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
+ * it is sufficient for unwinding the stack.
+ */
+ do {
+ insn = unwind_get_byte(ctrl);
+ result |= (insn & 0x7f) << (bytes * 7);
+ bytes++;
+ } while (!!(insn & 0x80) && (bytes != sizeof(result)));
+
+ return result;
+}
+
/*
* Execute the current unwind instruction.
*/
@@ -361,7 +384,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if (insn == 0xb2) {
- unsigned long uleb128 = unwind_get_byte(ctrl);
+ unsigned long uleb128 = unwind_decode_uleb128(ctrl);
ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
} else {
diff --git a/arch/arm/mach-s3c/Kconfig.s3c64xx b/arch/arm/mach-s3c/Kconfig.s3c64xx
index 01a7a8eec6e8..8f40af063ad6 100644
--- a/arch/arm/mach-s3c/Kconfig.s3c64xx
+++ b/arch/arm/mach-s3c/Kconfig.s3c64xx
@@ -69,11 +69,6 @@ config S3C64XX_SETUP_I2C1
help
Common setup code for i2c bus 1.
-config S3C64XX_SETUP_IDE
- bool
- help
- Common setup code for S3C64XX IDE.
-
config S3C64XX_SETUP_FB_24BPP
bool
help
@@ -110,7 +105,6 @@ config MACH_WLF_CRAGG_6410
select S3C64XX_DEV_SPI0
select S3C64XX_SETUP_FB_24BPP
select S3C64XX_SETUP_I2C1
- select S3C64XX_SETUP_IDE
select S3C64XX_SETUP_KEYPAD
select S3C64XX_SETUP_SDHCI
select S3C64XX_SETUP_SPI
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index 67f72ca984b2..1956b095e699 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* arch/arm/mac-sa1100/jornada720_ssp.c
*
* Copyright (C) 2006/2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
@@ -26,6 +26,7 @@ static unsigned long jornada_ssp_flags;
/**
* jornada_ssp_reverse - reverses input byte
+ * @byte: input byte to reverse
*
* we need to reverse all data we receive from the mcu due to its physical location
* returns : 01110111 -> 11101110
@@ -46,6 +47,7 @@ EXPORT_SYMBOL(jornada_ssp_reverse);
/**
* jornada_ssp_byte - waits for ready ssp bus and sends byte
+ * @byte: input byte to transmit
*
* waits for fifo buffer to clear and then transmits, if it doesn't then we will
* timeout after <timeout> rounds. Needs mcu running before its called.
@@ -77,6 +79,7 @@ EXPORT_SYMBOL(jornada_ssp_byte);
/**
* jornada_ssp_inout - decide if input is command or trading byte
+ * @byte: input byte to send (may be %TXDUMMY)
*
* returns : (jornada_ssp_byte(byte)) on success
* : %-ETIMEDOUT on timeout failure
diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
index 029578072d8f..7b41537731a6 100644
--- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi
+++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi
@@ -59,6 +59,7 @@
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
index ef68f5aae7dd..afdf954206f1 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
@@ -72,6 +72,7 @@
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
index 796cd7d02eb5..7bdeb965f0a9 100644
--- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -58,6 +58,7 @@
L2_0: l2-cache0 {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile
index f130165577a8..7114cbbd8713 100644
--- a/arch/arm64/boot/dts/renesas/Makefile
+++ b/arch/arm64/boot/dts/renesas/Makefile
@@ -79,10 +79,12 @@ dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043u11-smarc.dtb
dtb-$(CONFIG_ARCH_R9A07G043) += r9a07g043-smarc-pmod.dtbo
dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044c2-smarc.dtb
+dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044c2-smarc-cru-csi-ov5645.dtbo
dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044l2-smarc.dtb
dtb-$(CONFIG_ARCH_R9A07G044) += r9a07g044l2-smarc-cru-csi-ov5645.dtbo
dtb-$(CONFIG_ARCH_R9A07G054) += r9a07g054l2-smarc.dtb
+dtb-$(CONFIG_ARCH_R9A07G054) += r9a07g054l2-smarc-cru-csi-ov5645.dtbo
dtb-$(CONFIG_ARCH_R9A09G011) += r9a09g011-v2mevk2.dtb
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index bf587a14ec19..4e67a0356497 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -943,6 +943,56 @@
status = "disabled";
};
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a779a0", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@e6e31000 {
+ compatible = "renesas,pwm-r8a779a0", "renesas,pwm-rcar";
+ reg = <0 0xe6e31000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@e6e32000 {
+ compatible = "renesas,pwm-r8a779a0", "renesas,pwm-rcar";
+ reg = <0 0xe6e32000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@e6e33000 {
+ compatible = "renesas,pwm-r8a779a0", "renesas,pwm-rcar";
+ reg = <0 0xe6e33000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@e6e34000 {
+ compatible = "renesas,pwm-r8a779a0", "renesas,pwm-rcar";
+ reg = <0 0xe6e34000 0 0x10>;
+ #pwm-cells = <2>;
+ clocks = <&cpg CPG_MOD 628>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 628>;
+ status = "disabled";
+ };
+
scif0: serial@e6e60000 {
compatible = "renesas,scif-r8a779a0",
"renesas,rcar-gen4-scif", "renesas,scif";
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index 1315be5167b9..232910e07444 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -174,6 +174,76 @@
#size-cells = <2>;
ranges;
+ mtu3: timer@10001200 {
+ compatible = "renesas,r9a07g044-mtu3",
+ "renesas,rz-mtu3";
+ reg = <0 0x10001200 0 0xb00>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 173 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 174 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 175 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 176 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 179 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 180 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 181 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 182 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 183 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 184 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 185 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 186 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 187 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 188 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 192 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 194 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 196 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 198 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 199 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 200 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 201 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 202 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 204 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 206 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 207 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 208 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 209 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 210 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0",
+ "tgiv0", "tgie0", "tgif0",
+ "tgia1", "tgib1", "tgiv1", "tgiu1",
+ "tgia2", "tgib2", "tgiv2", "tgiu2",
+ "tgia3", "tgib3", "tgic3", "tgid3",
+ "tgiv3",
+ "tgia4", "tgib4", "tgic4", "tgid4",
+ "tgiv4",
+ "tgiu5", "tgiv5", "tgiw5",
+ "tgia6", "tgib6", "tgic6", "tgid6",
+ "tgiv6",
+ "tgia7", "tgib7", "tgic7", "tgid7",
+ "tgiv7",
+ "tgia8", "tgib8", "tgic8", "tgid8",
+ "tgiv8", "tgiu8";
+ clocks = <&cpg CPG_MOD R9A07G044_MTU_X_MCK_MTU3>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_MTU_X_PRESET_MTU3>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
ssi0: ssi@10049c00 {
compatible = "renesas,r9a07g044-ssi",
"renesas,rz-ssi";
@@ -697,6 +767,59 @@
};
};
+ dsi: dsi@10850000 {
+ compatible = "renesas,r9a07g044-mipi-dsi",
+ "renesas,rzg2l-mipi-dsi";
+ reg = <0 0x10850000 0 0x20000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "seq0", "seq1", "vin1", "rcv",
+ "ferr", "ppi", "debug";
+ clocks = <&cpg CPG_MOD R9A07G044_MIPI_DSI_PLLCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_SYSCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_ACLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_PCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_VCLK>,
+ <&cpg CPG_MOD R9A07G044_MIPI_DSI_LPCLK>;
+ clock-names = "pllclk", "sysclk", "aclk", "pclk", "vclk", "lpclk";
+ resets = <&cpg R9A07G044_MIPI_DSI_CMN_RSTB>,
+ <&cpg R9A07G044_MIPI_DSI_ARESET_N>,
+ <&cpg R9A07G044_MIPI_DSI_PRESET_N>;
+ reset-names = "rst", "arst", "prst";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ vspd: vsp@10870000 {
+ compatible = "renesas,r9a07g044-vsp2";
+ reg = <0 0x10870000 0 0x10000>;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A07G044_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G044_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G044_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_LCDC_RESET_N>;
+ renesas,fcp = <&fcpvd>;
+ };
+
+ fcpvd: fcp@10880000 {
+ compatible = "renesas,r9a07g044-fcpvd",
+ "renesas,fcpv";
+ reg = <0 0x10880000 0 0x10000>;
+ clocks = <&cpg CPG_MOD R9A07G044_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G044_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G044_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_LCDC_RESET_N>;
+ };
+
cpg: clock-controller@11010000 {
compatible = "renesas,r9a07g044-cpg";
reg = <0 0x11010000 0 0x10000>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc-cru-csi-ov5645.dtso b/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc-cru-csi-ov5645.dtso
new file mode 100644
index 000000000000..f983bdd3ea30
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g044c2-smarc-cru-csi-ov5645.dtso
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree overlay for the RZ/G2LC SMARC EVK with
+ * OV5645 camera connected to CSI and CRU enabled.
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+#define OV5645_PARENT_I2C i2c0
+#include "rz-smarc-cru-csi-ov5645.dtsi"
+
+&ov5645 {
+ enable-gpios = <&pinctrl RZG2L_GPIO(0, 1) GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pinctrl RZG2L_GPIO(5, 2) GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc-cru-csi-ov5645.dtso b/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc-cru-csi-ov5645.dtso
index d834bff9bda2..736c1e688cc8 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc-cru-csi-ov5645.dtso
+++ b/arch/arm64/boot/dts/renesas/r9a07g044l2-smarc-cru-csi-ov5645.dtso
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Device Tree overlay for the RZ/G2L SMARC EVK with OV5645 camera
+ * Device Tree overlay for the RZ/{G2L, V2L} SMARC EVK with OV5645 camera
* connected to CSI and CRU enabled.
*
* Copyright (C) 2023 Renesas Electronics Corp.
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
index cc11e5855d62..2eba3a8a100d 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
@@ -174,6 +174,76 @@
#size-cells = <2>;
ranges;
+ mtu3: timer@10001200 {
+ compatible = "renesas,r9a07g054-mtu3",
+ "renesas,rz-mtu3";
+ reg = <0 0x10001200 0 0xb00>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 173 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 174 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 175 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 176 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 177 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 178 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 179 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 180 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 181 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 182 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 183 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 184 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 185 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 186 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 187 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 188 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 192 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 193 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 194 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 195 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 196 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 198 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 199 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 200 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 201 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 202 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 204 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 205 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 206 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 207 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 208 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 209 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 210 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 211 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 212 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 213 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "tgia0", "tgib0", "tgic0", "tgid0",
+ "tgiv0", "tgie0", "tgif0",
+ "tgia1", "tgib1", "tgiv1", "tgiu1",
+ "tgia2", "tgib2", "tgiv2", "tgiu2",
+ "tgia3", "tgib3", "tgic3", "tgid3",
+ "tgiv3",
+ "tgia4", "tgib4", "tgic4", "tgid4",
+ "tgiv4",
+ "tgiu5", "tgiv5", "tgiw5",
+ "tgia6", "tgib6", "tgic6", "tgid6",
+ "tgiv6",
+ "tgia7", "tgib7", "tgic7", "tgid7",
+ "tgiv7",
+ "tgia8", "tgib8", "tgic8", "tgid8",
+ "tgiv8", "tgiu8";
+ clocks = <&cpg CPG_MOD R9A07G054_MTU_X_MCK_MTU3>;
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G054_MTU_X_PRESET_MTU3>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
ssi0: ssi@10049c00 {
compatible = "renesas,r9a07g054-ssi",
"renesas,rz-ssi";
@@ -623,6 +693,139 @@
status = "disabled";
};
+ cru: video@10830000 {
+ compatible = "renesas,r9a07g054-cru", "renesas,rzg2l-cru";
+ reg = <0 0x10830000 0 0x400>;
+ clocks = <&cpg CPG_MOD R9A07G054_CRU_VCLK>,
+ <&cpg CPG_MOD R9A07G054_CRU_PCLK>,
+ <&cpg CPG_MOD R9A07G054_CRU_ACLK>;
+ clock-names = "video", "apb", "axi";
+ interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "image_conv", "image_conv_err", "axi_mst_err";
+ resets = <&cpg R9A07G054_CRU_PRESETN>,
+ <&cpg R9A07G054_CRU_ARESETN>;
+ reset-names = "presetn", "aresetn";
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0>;
+ cruparallel: endpoint@0 {
+ reg = <0>;
+ };
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+ crucsi2: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi2cru>;
+ };
+ };
+ };
+ };
+
+ csi2: csi2@10830400 {
+ compatible = "renesas,r9a07g054-csi2", "renesas,rzg2l-csi2";
+ reg = <0 0x10830400 0 0xfc00>;
+ interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A07G054_CRU_SYSCLK>,
+ <&cpg CPG_MOD R9A07G054_CRU_VCLK>,
+ <&cpg CPG_MOD R9A07G054_CRU_PCLK>;
+ clock-names = "system", "video", "apb";
+ resets = <&cpg R9A07G054_CRU_PRESETN>,
+ <&cpg R9A07G054_CRU_CMN_RSTB>;
+ reset-names = "presetn", "cmn-rstb";
+ power-domains = <&cpg>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ csi2cru: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&crucsi2>;
+ };
+ };
+ };
+ };
+
+ dsi: dsi@10850000 {
+ compatible = "renesas,r9a07g054-mipi-dsi",
+ "renesas,rzg2l-mipi-dsi";
+ reg = <0 0x10850000 0 0x20000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "seq0", "seq1", "vin1", "rcv",
+ "ferr", "ppi", "debug";
+ clocks = <&cpg CPG_MOD R9A07G054_MIPI_DSI_PLLCLK>,
+ <&cpg CPG_MOD R9A07G054_MIPI_DSI_SYSCLK>,
+ <&cpg CPG_MOD R9A07G054_MIPI_DSI_ACLK>,
+ <&cpg CPG_MOD R9A07G054_MIPI_DSI_PCLK>,
+ <&cpg CPG_MOD R9A07G054_MIPI_DSI_VCLK>,
+ <&cpg CPG_MOD R9A07G054_MIPI_DSI_LPCLK>;
+ clock-names = "pllclk", "sysclk", "aclk", "pclk", "vclk", "lpclk";
+ resets = <&cpg R9A07G054_MIPI_DSI_CMN_RSTB>,
+ <&cpg R9A07G054_MIPI_DSI_ARESET_N>,
+ <&cpg R9A07G054_MIPI_DSI_PRESET_N>;
+ reset-names = "rst", "arst", "prst";
+ power-domains = <&cpg>;
+ status = "disabled";
+ };
+
+ vspd: vsp@10870000 {
+ compatible = "renesas,r9a07g054-vsp2",
+ "renesas,r9a07g044-vsp2";
+ reg = <0 0x10870000 0 0x10000>;
+ interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD R9A07G054_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G054_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G054_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G054_LCDC_RESET_N>;
+ renesas,fcp = <&fcpvd>;
+ };
+
+ fcpvd: fcp@10880000 {
+ compatible = "renesas,r9a07g054-fcpvd",
+ "renesas,fcpv";
+ reg = <0 0x10880000 0 0x10000>;
+ clocks = <&cpg CPG_MOD R9A07G054_LCDC_CLK_A>,
+ <&cpg CPG_MOD R9A07G054_LCDC_CLK_P>,
+ <&cpg CPG_MOD R9A07G054_LCDC_CLK_D>;
+ clock-names = "aclk", "pclk", "vclk";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G054_LCDC_RESET_N>;
+ };
+
cpg: clock-controller@11010000 {
compatible = "renesas,r9a07g054-cpg";
reg = <0 0x11010000 0 0x10000>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc-cru-csi-ov5645.dtso b/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc-cru-csi-ov5645.dtso
new file mode 120000
index 000000000000..0f175341d3ed
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r9a07g054l2-smarc-cru-csi-ov5645.dtso
@@ -0,0 +1 @@
+r9a07g044l2-smarc-cru-csi-ov5645.dtso \ No newline at end of file
diff --git a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
index e180a955b6ac..2a158a954b2f 100644
--- a/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2l-smarc.dtsi
@@ -16,12 +16,91 @@
serial1 = &scif2;
i2c3 = &i2c3;
};
+
+ osc1: cec-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <12000000>;
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "d";
+
+ port {
+ hdmi_con_out: endpoint {
+ remote-endpoint = <&adv7535_out>;
+ };
+ };
+ };
};
&cpu_dai {
sound-dai = <&ssi0>;
};
+&dsi {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&adv7535_in>;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ adv7535: hdmi@3d {
+ compatible = "adi,adv7535";
+ reg = <0x3d>;
+
+ interrupt-parent = <&pinctrl>;
+ interrupts = <RZG2L_GPIO(2, 1) IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&osc1>;
+ clock-names = "cec";
+ avdd-supply = <&reg_1p8v>;
+ dvdd-supply = <&reg_1p8v>;
+ pvdd-supply = <&reg_1p8v>;
+ a2vdd-supply = <&reg_1p8v>;
+ v3p3-supply = <&reg_3p3v>;
+ v1p2-supply = <&reg_1p8v>;
+
+ adi,dsi-lanes = <4>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7535_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7535_out: endpoint {
+ remote-endpoint = <&hdmi_con_out>;
+ };
+ };
+ };
+ };
+};
+
&i2c3 {
pinctrl-0 = <&i2c3_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
index b6bd27196d88..6818fd49b2be 100644
--- a/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg2lc-smarc.dtsi
@@ -17,6 +17,23 @@
serial1 = &scif1;
i2c2 = &i2c2;
};
+
+ osc1: cec-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <12000000>;
+ };
+
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "d";
+
+ port {
+ hdmi_con_out: endpoint {
+ remote-endpoint = <&adv7535_out>;
+ };
+ };
+ };
};
#if (SW_SCIF_CAN || SW_RSPI_CAN)
@@ -36,6 +53,68 @@
sound-dai = <&ssi0>;
};
+&dsi {
+ status = "okay";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi0_in: endpoint {
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi0_out: endpoint {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&adv7535_in>;
+ };
+ };
+ };
+};
+
+&i2c1 {
+ adv7535: hdmi@3d {
+ compatible = "adi,adv7535";
+ reg = <0x3d>;
+
+ interrupt-parent = <&pinctrl>;
+ interrupts = <RZG2L_GPIO(43, 1) IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&osc1>;
+ clock-names = "cec";
+ avdd-supply = <&reg_1p8v>;
+ dvdd-supply = <&reg_1p8v>;
+ pvdd-supply = <&reg_1p8v>;
+ a2vdd-supply = <&reg_1p8v>;
+ v3p3-supply = <&reg_3p3v>;
+ v1p2-supply = <&reg_1p8v>;
+
+ adi,dsi-lanes = <4>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ adv7535_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ adv7535_out: endpoint {
+ remote-endpoint = <&hdmi_con_out>;
+ };
+ };
+ };
+ };
+};
+
&i2c2 {
pinctrl-0 = <&i2c2_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 2d585bbb8f3a..1cf105d7407f 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -85,6 +85,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-box-demo.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3566-lubancat-1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-bpi-r2-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-evb1-v10.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-fastrhino-r66s.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-fastrhino-r68s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-lubancat-2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-nanopi-r5c.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3568-nanopi-r5s.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index dd228a256a32..2ae4bb7d5e62 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -97,6 +97,7 @@
l2: l2-cache {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index f69a38f42d2d..0a27fa5271f5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -37,7 +37,8 @@
vin-supply = <&vcc_io>;
};
- vcc_host_5v: vcc-host-5v-regulator {
+ /* Common enable line for all of the rails mentioned in the labels */
+ vcc_host_5v: vcc_host1_5v: vcc_otg_5v: vcc-host-5v-regulator {
compatible = "regulator-fixed";
gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
@@ -48,17 +49,6 @@
vin-supply = <&vcc_sys>;
};
- vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
- compatible = "regulator-fixed";
- gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
- pinctrl-names = "default";
- pinctrl-0 = <&usb20_host_drv>;
- regulator-name = "vcc_host1_5v";
- regulator-always-on;
- regulator-boot-on;
- vin-supply = <&vcc_sys>;
- };
-
vcc_sys: vcc-sys {
compatible = "regulator-fixed";
regulator-name = "vcc_sys";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 6d7a7bf72ac7..e729e7a22b23 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -103,6 +103,7 @@
l2: l2-cache0 {
compatible = "cache";
cache-level = <2>;
+ cache-unified;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
index 263ce40770dd..cddf6cd2fecb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz-cm4.dts
@@ -28,6 +28,16 @@
regulator-max-microvolt = <5000000>;
vin-supply = <&vcc12v_dcin>;
};
+
+ vcc_sd_pwr: vcc-sd-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sd_pwr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc3v3_sys>;
+ };
};
/* phy for pcie */
@@ -130,13 +140,7 @@
};
&sdmmc0 {
- vmmc-supply = <&sdmmc_pwr>;
- status = "okay";
-};
-
-&sdmmc_pwr {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
+ vmmc-supply = <&vcc_sd_pwr>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
index 102e448bc026..31aa2b8efe39 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
@@ -104,16 +104,6 @@
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc5v0_sys>;
};
-
- sdmmc_pwr: sdmmc-pwr-regulator {
- compatible = "regulator-fixed";
- enable-active-high;
- gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&sdmmc_pwr_h>;
- regulator-name = "sdmmc_pwr";
- status = "disabled";
- };
};
&cpu0 {
@@ -155,6 +145,19 @@
status = "disabled";
};
+&gpio0 {
+ nextrst-hog {
+ gpio-hog;
+ /*
+ * GPIO_ACTIVE_LOW + output-low here means that the pin is set
+ * to high, because output-low decides the value pre-inversion.
+ */
+ gpios = <RK_PA5 GPIO_ACTIVE_LOW>;
+ line-name = "nEXTRST";
+ output-low;
+ };
+};
+
&gpu {
mali-supply = <&vdd_gpu>;
status = "okay";
@@ -538,12 +541,6 @@
rockchip,pins = <2 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
-
- sdmmc-pwr {
- sdmmc_pwr_h: sdmmc-pwr-h {
- rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
- };
- };
};
&pmu_io_domains {
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
new file mode 100644
index 000000000000..58ab7e9971db
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dts
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include "rk3568-fastrhino-r66s.dtsi"
+
+/ {
+ model = "Lunzn FastRhino R66S";
+ compatible = "lunzn,fastrhino-r66s", "rockchip,rk3568";
+
+ aliases {
+ mmc0 = &sdmmc0;
+ };
+};
+
+&sdmmc0 {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ disable-wp;
+ max-frequency = <150000000>;
+ no-sdio;
+ no-mmc;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_bus4 &sdmmc0_clk &sdmmc0_cmd &sdmmc0_det>;
+ vmmc-supply = <&vcc3v3_sd>;
+ vqmmc-supply = <&vccio_sd>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
new file mode 100644
index 000000000000..25e205632a68
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+#include <dt-bindings/soc/rockchip,vop2.h>
+#include "rk3568.dtsi"
+
+/ {
+ chosen: chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&reset_button_pin>;
+
+ button-reset {
+ debounce-interval = <50>;
+ gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>;
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&status_led_pin>;
+
+ status_led: led-status {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
+ gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ dc_12v: dc-12v-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "dc_12v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ vcc3v3_pcie: vcc3v3-pcie-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_pcie";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc5v0_sys>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc5v0_sys: vcc5v0-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vcc5v0_usb_host: vcc5v0-usb-host-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb_host";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ vcc5v0_usb_otg: vcc5v0-usb-otg-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_usb_otg_en>;
+ regulator-name = "vcc5v0_usb_otg";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&combphy0 {
+ status = "okay";
+};
+
+&combphy1 {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu1 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_cpu>;
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: regulator@1c {
+ compatible = "tcs,tcs4525";
+ reg = <0x1c>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-ramp-delay = <2300>;
+ vin-supply = <&vcc5v0_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ rk809: pmic@20 {
+ compatible = "rockchip,rk809";
+ reg = <0x20>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int>;
+ rockchip,system-power-controller;
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc5-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ wakeup-source;
+
+ regulators {
+ vdd_logic: DCDC_REG1 {
+ regulator-name = "vdd_logic";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-init-microvolt = <900000>;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: DCDC_REG2 {
+ regulator-name = "vdd_gpu";
+ regulator-always-on;
+ regulator-init-microvolt = <900000>;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-initial-mode = <0x2>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vdd_npu: DCDC_REG4 {
+ regulator-name = "vdd_npu";
+ regulator-init-microvolt = <900000>;
+ regulator-initial-mode = <0x2>;
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <6001>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG5 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_image: LDO_REG1 {
+ regulator-name = "vdda0v9_image";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <950000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda_0v9: LDO_REG2 {
+ regulator-name = "vdda_0v9";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdda0v9_pmu: LDO_REG3 {
+ regulator-name = "vdda0v9_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <900000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <900000>;
+ };
+ };
+
+ vccio_acodec: LDO_REG4 {
+ regulator-name = "vccio_acodec";
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vccio_sd: LDO_REG5 {
+ regulator-name = "vccio_sd";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_pmu: LDO_REG6 {
+ regulator-name = "vcc3v3_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3300000>;
+ };
+ };
+
+ vcca_1v8: LDO_REG7 {
+ regulator-name = "vcca_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcca1v8_pmu: LDO_REG8 {
+ regulator-name = "vcca1v8_pmu";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcca1v8_image: LDO_REG9 {
+ regulator-name = "vcca1v8_image";
+ regulator-init-microvolt = <950000>;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-suspend-microvolt = <950000>;
+ };
+ };
+
+ vcc_3v3: SWITCH_REG1 {
+ regulator-name = "vcc_3v3";
+ regulator-always-on;
+ regulator-boot-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_sd: SWITCH_REG2 {
+ regulator-name = "vcc3v3_sd";
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+};
+
+&pcie30phy {
+ data-lanes = <1 2>;
+ status = "okay";
+};
+
+&pcie3x1 {
+ num-lanes = <1>;
+ reset-gpios = <&gpio0 RK_PC3 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pcie3x2 {
+ num-lanes = <1>;
+ reset-gpios = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>;
+ vpcie3v3-supply = <&vcc3v3_pcie>;
+ status = "okay";
+};
+
+&pinctrl {
+ gpio-leds {
+ status_led_pin: status-led-pin {
+ rockchip,pins = <0 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ pmic {
+ pmic_int: pmic-int {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ rockchip-key {
+ reset_button_pin: reset-button-pin {
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ usb {
+ vcc5v0_usb_otg_en: vcc5v0-usb-otg-en {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&pmu_io_domains {
+ pmuio1-supply = <&vcc3v3_pmu>;
+ pmuio2-supply = <&vcc3v3_pmu>;
+ vccio1-supply = <&vccio_acodec>;
+ vccio3-supply = <&vccio_sd>;
+ vccio4-supply = <&vcc_1v8>;
+ vccio5-supply = <&vcc_3v3>;
+ vccio6-supply = <&vcc_1v8>;
+ vccio7-supply = <&vcc_3v3>;
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca_1v8>;
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <0>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host0_xhci {
+ dr_mode = "host";
+ extcon = <&usb2phy0>;
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usb_host1_xhci {
+ status = "okay";
+};
+
+&usb2phy0 {
+ status = "okay";
+};
+
+&usb2phy0_host {
+ phy-supply = <&vcc5v0_usb_host>;
+ status = "okay";
+};
+
+&usb2phy0_otg {
+ phy-supply = <&vcc5v0_usb_otg>;
+ status = "okay";
+};
+
+&vop {
+ assigned-clocks = <&cru DCLK_VOP0>, <&cru DCLK_VOP1>;
+ assigned-clock-parents = <&pmucru PLL_HPLL>, <&cru PLL_VPLL>;
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
new file mode 100644
index 000000000000..e1fe5e442689
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r68s.dts
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include "rk3568-fastrhino-r66s.dtsi"
+
+/ {
+ model = "Lunzn FastRhino R68S";
+ compatible = "lunzn,fastrhino-r68s", "rockchip,rk3568";
+
+ aliases {
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ mmc0 = &sdhci;
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+
+ button-recovery {
+ label = "Recovery";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <1750>;
+ };
+ };
+};
+
+&gmac0 {
+ assigned-clocks = <&cru SCLK_GMAC0_RX_TX>, <&cru SCLK_GMAC0>;
+ assigned-clock-parents = <&cru SCLK_GMAC0_RGMII_SPEED>;
+ assigned-clock-rates = <0>, <125000000>;
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy0>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac0_miim
+ &gmac0_tx_bus2
+ &gmac0_rx_bus2
+ &gmac0_rgmii_clk
+ &gmac0_rgmii_bus>;
+ snps,reset-gpio = <&gpio0 RK_PB0 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ /* Reset time is 15ms, 50ms for rtl8211f */
+ snps,reset-delays-us = <0 15000 50000>;
+ tx_delay = <0x3c>;
+ rx_delay = <0x2f>;
+ status = "okay";
+};
+
+&gmac1 {
+ assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>;
+ assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>;
+ assigned-clock-rates = <0>, <125000000>;
+ clock_in_out = "output";
+ phy-handle = <&rgmii_phy1>;
+ phy-mode = "rgmii-id";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gmac1m1_miim
+ &gmac1m1_tx_bus2
+ &gmac1m1_rx_bus2
+ &gmac1m1_rgmii_clk
+ &gmac1m1_rgmii_bus>;
+ snps,reset-gpio = <&gpio0 RK_PB1 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ /* Reset time is 15ms, 50ms for rtl8211f */
+ snps,reset-delays-us = <0 15000 50000>;
+ tx_delay = <0x4f>;
+ rx_delay = <0x26>;
+ status = "okay";
+};
+
+&mdio0 {
+ rgmii_phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-0 = <&eth_phy0_reset_pin>;
+ pinctrl-names = "default";
+ };
+};
+
+&mdio1 {
+ rgmii_phy1: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ pinctrl-0 = <&eth_phy1_reset_pin>;
+ pinctrl-names = "default";
+ };
+};
+
+&pinctrl {
+ gmac0 {
+ eth_phy0_reset_pin: eth-phy0-reset-pin {
+ rockchip,pins = <1 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ gmac1 {
+ eth_phy1_reset_pin: eth-phy1-reset-pin {
+ rockchip,pins = <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&sdhci {
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_bus8 &emmc_clk &emmc_cmd &emmc_datastrobe>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
index 657c019d27fa..63be894a8999 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
@@ -229,6 +229,7 @@
cache-line-size = <64>;
cache-sets = <512>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -238,6 +239,7 @@
cache-line-size = <64>;
cache-sets = <512>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -247,6 +249,7 @@
cache-line-size = <64>;
cache-sets = <512>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -256,6 +259,7 @@
cache-line-size = <64>;
cache-sets = <512>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -265,6 +269,7 @@
cache-line-size = <64>;
cache-sets = <1024>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -274,6 +279,7 @@
cache-line-size = <64>;
cache-sets = <1024>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -283,6 +289,7 @@
cache-line-size = <64>;
cache-sets = <1024>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -292,6 +299,7 @@
cache-line-size = <64>;
cache-sets = <1024>;
cache-level = <2>;
+ cache-unified;
next-level-cache = <&l3_cache>;
};
@@ -301,6 +309,7 @@
cache-line-size = <64>;
cache-sets = <4096>;
cache-level = <3>;
+ cache-unified;
};
};
@@ -1300,7 +1309,24 @@
mbi-alias = <0x0 0xfe610000>;
mbi-ranges = <424 56>;
msi-controller;
+ ranges;
+ #address-cells = <2>;
#interrupt-cells = <4>;
+ #size-cells = <2>;
+
+ its0: msi-controller@fe640000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0xfe640000 0x0 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
+
+ its1: msi-controller@fe660000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x0 0xfe660000 0x0 0x20000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
ppi-partitions {
ppi_partition0: interrupt-partition-0 {
@@ -1400,6 +1426,14 @@
status = "disabled";
};
+ timer0: timer@feae0000 {
+ compatible = "rockchip,rk3588-timer", "rockchip,rk3288-timer";
+ reg = <0x0 0xfeae0000 0x0 0x20>;
+ interrupts = <GIC_SPI 289 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&cru PCLK_BUSTIMER0>, <&cru CLK_BUSTIMER0>;
+ clock-names = "pclk", "timer";
+ };
+
wdt: watchdog@feaf0000 {
compatible = "rockchip,rk3588-wdt", "snps,dw-wdt";
reg = <0x0 0xfeaf0000 0x0 0x100>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
index 3e9979ab60bb..5e7e1bf5b811 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-clk-ccf.dtsi
@@ -95,6 +95,10 @@
clocks = <&zynqmp_clk GDMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
+&gpu {
+ clocks = <&zynqmp_clk GPU_REF>, <&zynqmp_clk GPU_PP0_REF>;
+};
+
&lpd_dma_chan1 {
clocks = <&zynqmp_clk ADMA_REF>, <&zynqmp_clk LPD_LSBUS>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts
index 20e83ca47b5d..34412304d09f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-sm-k26-revA.dts
@@ -287,3 +287,7 @@
"", "", "", "", "", /* 165 - 169 */
"", "", "", ""; /* 170 - 173 */
};
+
+&gpu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
index b05be2552826..f89ef2afcd9e 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm015-dc1.dts
@@ -108,6 +108,9 @@
pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
&i2c1 {
status = "okay";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
index 05a2b79738af..6e0106bf1294 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm018-dc4.dts
@@ -152,6 +152,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
clock-frequency = <400000>;
status = "okay";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index 6948fd40554b..c74bc3ff703b 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -161,6 +161,10 @@
"", "", "", "";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
pinctrl-names = "default", "gpio";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index 5fd6b70a154a..13c43324f1d2 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -216,6 +216,10 @@
pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index bd8f20f3223d..485585c491f4 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -122,6 +122,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
index 96feaad30166..44ec9edd2452 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revC.dts
@@ -127,6 +127,10 @@
status = "okay";
};
+&gpu {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 24a252317150..09773b7200f8 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -227,6 +227,10 @@
pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index d685d8fbc36a..e0305dcbb010 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -187,6 +187,10 @@
pinctrl-0 = <&pinctrl_gpio_default>;
};
+&gpu {
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
clock-frequency = <400000>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 153db59dc4b3..bb0d0be30aa0 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -411,6 +411,18 @@
interrupts = <1 9 0xf04>;
};
+ gpu: gpu@fd4b0000 {
+ status = "disabled";
+ compatible = "xlnx,zynqmp-mali", "arm,mali-400";
+ reg = <0x0 0xfd4b0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 132 4>, <0 132 4>, <0 132 4>,
+ <0 132 4>, <0 132 4>, <0 132 4>;
+ interrupt-names = "gp", "gpmmu", "pp0", "ppmmu0", "pp1", "ppmmu1";
+ clock-names = "bus", "core";
+ power-domains = <&zynqmp_firmware PD_GPU>;
+ };
+
/* LPDDMA default allows only secured access. inorder to enable
* These dma channels, Users should ensure that these dma
* Channels are allowed for non secure access.
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 4cd6762bda80..dc3c072e862f 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx {
kvm_pte_t old;
void *arg;
struct kvm_pgtable_mm_ops *mm_ops;
+ u64 start;
u64 addr;
u64 end;
u32 level;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0bd18de9fd97..932190079fa2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -180,13 +180,18 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
return pmd;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_kernel(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_mkwrite_kernel(pte);
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY));
@@ -487,7 +492,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_cont(pmd) pte_cont(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd, vma) pte_pmd(pte_mkwrite(pmd_pte(pmd), (vma)))
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 2cfc810d0a5b..06d31731c8ed 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -1343,7 +1343,7 @@ void __init minsigstksz_setup(void)
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 4700f8522d27..bbd542704730 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -460,7 +460,7 @@ void compat_setup_restart_syscall(struct pt_regs *regs)
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 1279949599b5..4c9dcd8fc939 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
fpsimd_kvm_prepare();
+ /*
+ * We will check TIF_FOREIGN_FPSTATE just before entering the
+ * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
+ * FP_STATE_FREE if the flag set.
+ */
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
- /*
- * We don't currently support SME guests but if we leave
- * things in streaming mode then when the guest starts running
- * FPSIMD or SVE code it may generate SME traps so as a
- * special case if we are in streaming mode we force the host
- * state to be saved now and exit streaming mode so that we
- * don't have to handle any SME traps for valid guest
- * operations. Do this for ZA as well for now for simplicity.
- */
if (system_supports_sme()) {
vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
+ /*
+ * If PSTATE.SM is enabled then save any pending FP
+ * state and disable PSTATE.SM. If we leave PSTATE.SM
+ * enabled and the guest does not enable SME via
+ * CPACR_EL1.SMEN then operations that should be valid
+ * may generate SME traps from EL1 to EL1 which we
+ * can't intercept and which would confuse the guest.
+ *
+ * Do the same for PSTATE.ZA in the case where there
+ * is state in the registers which has not already
+ * been saved, this is very unlikely to happen.
+ */
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
vcpu->arch.fp_state = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index c41166f1a1dd..e78a08a72a3c 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -177,9 +177,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
sve_guest = vcpu_has_sve(vcpu);
esr_ec = kvm_vcpu_trap_get_class(vcpu);
- /* Don't handle SVE traps for non-SVE vcpus here: */
- if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
+ /* Only handle traps the vCPU can support here: */
+ switch (esr_ec) {
+ case ESR_ELx_EC_FP_ASIMD:
+ break;
+ case ESR_ELx_EC_SVE:
+ if (!sve_guest)
+ return false;
+ break;
+ default:
return false;
+ }
/* Valid trap. Switch the context: */
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 3d61bd3e591d..5282cb9ca4cf 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -58,8 +58,9 @@
struct kvm_pgtable_walk_data {
struct kvm_pgtable_walker *walker;
+ const u64 start;
u64 addr;
- u64 end;
+ const u64 end;
};
static bool kvm_phys_is_valid(u64 phys)
@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
.old = READ_ONCE(*ptep),
.arg = data->walker->arg,
.mm_ops = mm_ops,
+ .start = data->start,
.addr = data->addr,
.end = data->end,
.level = level,
@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker)
{
struct kvm_pgtable_walk_data walk_data = {
+ .start = ALIGN_DOWN(addr, PAGE_SIZE),
.addr = ALIGN_DOWN(addr, PAGE_SIZE),
.end = PAGE_ALIGN(walk_data.addr + size),
.walker = walker,
@@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
}
struct hyp_map_data {
- u64 phys;
+ const u64 phys;
kvm_pte_t attr;
};
@@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct hyp_map_data *data)
{
+ u64 phys = data->phys + (ctx->addr - ctx->start);
kvm_pte_t new;
- u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
if (!kvm_block_mapping_supported(ctx, phys))
return false;
- data->phys += granule;
new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
if (ctx->old == new)
return true;
@@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
}
struct stage2_map_data {
- u64 phys;
+ const u64 phys;
kvm_pte_t attr;
u8 owner_id;
@@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
}
+static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
+ const struct stage2_map_data *data)
+{
+ u64 phys = data->phys;
+
+ /*
+ * Stage-2 walks to update ownership data are communicated to the map
+ * walker using an invalid PA. Avoid offsetting an already invalid PA,
+ * which could overflow and make the address valid again.
+ */
+ if (!kvm_phys_is_valid(phys))
+ return phys;
+
+ /*
+ * Otherwise, work out the correct PA based on how far the walk has
+ * gotten.
+ */
+ return phys + (ctx->addr - ctx->start);
+}
+
static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
+ u64 phys = stage2_map_walker_phys_addr(ctx, data);
+
if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
return false;
- return kvm_block_mapping_supported(ctx, data->phys);
+ return kvm_block_mapping_supported(ctx, phys);
}
static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
kvm_pte_t new;
- u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
+ u64 phys = stage2_map_walker_phys_addr(ctx, data);
+ u64 granule = kvm_granule_size(ctx->level);
struct kvm_pgtable *pgt = data->mmu->pgt;
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
@@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
stage2_make_pte(ctx, new);
- if (kvm_phys_is_valid(phys))
- data->phys += granule;
return 0;
}
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 64c3aec0d937..0bd93a5f21ce 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -204,7 +204,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
* Size Fault at level 0, as if exceeding PARange.
*
* Non-LPAE guests will only get the external abort, as there
- * is no way to to describe the ASF.
+ * is no way to describe the ASF.
*/
if (vcpu_el1_is_32bit(vcpu) &&
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c
index 08978d0672e7..7fe8ba1a2851 100644
--- a/arch/arm64/kvm/vmid.c
+++ b/arch/arm64/kvm/vmid.c
@@ -47,7 +47,7 @@ static void flush_context(void)
int cpu;
u64 vmid;
- bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
+ bitmap_zero(vmid_map, NUM_USER_VMIDS);
for_each_possible_cpu(cpu) {
vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
@@ -182,8 +182,7 @@ int __init kvm_arm_vmid_alloc_init(void)
*/
WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
- vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
- sizeof(*vmid_map), GFP_KERNEL);
+ vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
if (!vmid_map)
return -ENOMEM;
@@ -192,5 +191,5 @@ int __init kvm_arm_vmid_alloc_init(void)
void __init kvm_arm_vmid_alloc_free(void)
{
- kfree(vmid_map);
+ bitmap_free(vmid_map);
}
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index 4ea2eefbc053..5c07e68d80ea 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -40,7 +40,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* read only (code, rodata). Clear the RDONLY bit from
* the temporary mappings we use during restore.
*/
- set_pte(dst_ptep, pte_mkwrite(pte));
+ set_pte(dst_ptep, pte_mkwrite_kernel(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
/*
* debug_pagealloc will removed the PTE_VALID bit if
@@ -53,7 +53,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
*/
BUG_ON(!pfn_valid(pte_pfn(pte)));
- set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
+ set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_kernel(pte)));
}
}
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index d4042495febc..c2f92c991e37 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -176,7 +176,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 59393613d086..14ab9c789c0e 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -300,7 +300,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
}
/* pte_mkwrite - mark page as writable */
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= _PAGE_WRITE;
return pte;
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 21c97e31a28a..f879dd626da6 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -268,7 +268,7 @@ ia64_phys_addr_valid (unsigned long addr)
* access rights:
*/
#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
-#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
+#define pte_mkwrite(pte, vma) (__pte(pte_val(pte) | _PAGE_AR_RW))
#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index d28fb9dbec59..ebf645f40298 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -390,7 +390,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
@@ -490,7 +490,7 @@ static inline int pmd_write(pmd_t pmd)
return !!(pmd_val(pmd) & _PAGE_WRITE);
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
pmd_val(pmd) |= _PAGE_WRITE;
if (pmd_val(pmd) & _PAGE_MODIFIED)
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index d97fbb812f63..9abb78acc237 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -211,7 +211,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= CF_PAGE_WRITABLE;
return pte;
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index ec0dc19ab834..c4e8eb76286d 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -155,7 +155,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_RONLY; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RONLY; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mknocache(pte_t pte)
@@ -168,6 +167,11 @@ static inline pte_t pte_mkcache(pte_t pte)
pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
return pte;
}
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ pte_val(pte) &= ~_PAGE_RONLY;
+ return pte;
+}
#define swapper_pg_dir kernel_pg_dir
extern pgd_t kernel_pg_dir[128];
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index e582b0484a55..2a06bea51a1e 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -143,10 +143,14 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & SUN3_PAGE_ACCESS
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~SUN3_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= SUN3_PAGE_WRITEABLE; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= SUN3_PAGE_MODIFIED; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= SUN3_PAGE_ACCESSED; return pte; }
static inline pte_t pte_mknocache(pte_t pte) { pte_val(pte) |= SUN3_PAGE_NOCACHE; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ pte_val(pte) |= SUN3_PAGE_WRITEABLE;
+ return pte;
+}
// use this version when caches work...
//static inline pte_t pte_mkcache(pte_t pte) { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
// until then, use:
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index d1b8272abcd9..5b83e82f8d7e 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -266,7 +266,7 @@ static inline pte_t pte_mkread(pte_t pte) \
{ pte_val(pte) |= _PAGE_USER; return pte; }
static inline pte_t pte_mkexec(pte_t pte) \
{ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) \
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) \
{ pte_val(pte) |= _PAGE_RW; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) \
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 574fa14ac8b2..a05cb342c80d 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -309,7 +309,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
@@ -364,7 +364,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
@@ -627,7 +627,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
return pmd;
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
pmd_val(pmd) |= _PAGE_WRITE;
if (pmd_val(pmd) & _PAGE_MODIFIED)
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 0f5c2564e9f5..edd458518e0e 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -129,7 +129,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= _PAGE_WRITE;
return pte;
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 3eb9b9555d0d..fd40aec189d1 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -250,7 +250,7 @@ static inline pte_t pte_mkold(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
pte_val(pte) |= _PAGE_WRITE;
return pte;
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index e2950f5db7c9..89f62137e67f 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -331,8 +331,12 @@ static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; retu
static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ pte_val(pte) |= _PAGE_WRITE;
+ return pte;
+}
/*
* Huge pte definitions.
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 7bf1fe7297c6..10d9a1d2aca9 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -498,7 +498,7 @@ static inline pte_t pte_mkpte(pte_t pte)
return pte;
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return __pte(pte_val(pte) | _PAGE_RW);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 4acc9690f599..be0636522d36 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -600,7 +600,7 @@ static inline pte_t pte_mkexec(pte_t pte)
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
/*
* write implies read, hence set both
@@ -1071,7 +1071,7 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd, vma) pte_pmd(pte_mkwrite(pmd_pte(pmd), (vma)))
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index fec56d965f00..7bfbcb9ba55b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -171,7 +171,7 @@ void unmap_kernel_page(unsigned long va);
do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
#ifndef pte_mkwrite
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return __pte(pte_val(pte) | _PAGE_RW);
}
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 1a89ebdc3acc..f32450eb270a 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -101,7 +101,7 @@ static inline int pte_write(pte_t pte)
#define pte_write pte_write
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return __pte(pte_val(pte) & ~_PAGE_RO);
}
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 287e25864ffa..589009555877 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -85,7 +85,7 @@
#ifndef __ASSEMBLY__
/* pte_clear moved to later in this file */
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return __pte(pte_val(pte) | _PAGE_RW);
}
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 85bdd7d3652f..48e0eaf1ad61 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -93,11 +93,12 @@ static int process_ISA_OF_ranges(struct device_node *isa_node,
}
inval_range:
- if (!phb_io_base_phys) {
+ if (phb_io_base_phys) {
pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n");
remap_isa_base(phb_io_base_phys, 0x10000);
+ return 0;
}
- return 0;
+ return -EINVAL;
}
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 828d0f4106d2..cba6dd15de3b 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -200,7 +200,7 @@ static int __init TAU_init(void)
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
- tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
+ tau_workq = alloc_ordered_workqueue("tau", 0);
if (!tau_workq)
return -ENOMEM;
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 8bfafc9d2bf7..67467cd6f34c 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-config PPC_86xx
menuconfig PPC_86xx
bool "86xx-based boards"
depends on PPC_BOOK3S_32
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 0d9b7609c7d5..3e2e252016f7 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -265,6 +265,7 @@ config CPM2
config FSL_ULI1575
bool "ULI1575 PCIe south bridge support"
depends on FSL_SOC_BOOKE || PPC_86xx
+ depends on PCI
select FSL_PCI
select GENERIC_ISA_DMA
help
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7bd0b563e163..dea6f0f25897 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -326,12 +326,6 @@ spu_irq_class_1(int irq, void *data)
if (stat & CLASS1_STORAGE_FAULT_INTR)
__spu_trap_data_map(spu, dar, dsisr);
- if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
- ;
-
- if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
- ;
-
spu->class_1_dsisr = 0;
spu->class_1_dar = 0;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 719c97a155ed..47f8eabd1bee 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -564,8 +564,7 @@ int __init dlpar_workqueue_init(void)
if (pseries_hp_wq)
return 0;
- pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
- WQ_UNBOUND, 1);
+ pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
return pseries_hp_wq ? 0 : -ENOMEM;
}
diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi
index 4c5fdb905da8..30e1f34d5cf8 100644
--- a/arch/riscv/boot/dts/starfive/jh7110.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi
@@ -496,5 +496,12 @@
gpio-controller;
#gpio-cells = <2>;
};
+
+ pwrc: power-controller@17030000 {
+ compatible = "starfive,jh7110-pmu";
+ reg = <0x0 0x17030000 0x0 0x10000>;
+ interrupts = <111>;
+ #power-domain-cells = <1>;
+ };
};
};
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2258b27173b0..d9f63965cb27 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -379,7 +379,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
/* static inline pte_t pte_mkread(pte_t pte) */
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return __pte(pte_val(pte) | _PAGE_WRITE);
}
@@ -665,9 +665,9 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
- return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
+ return pte_pmd(pte_mkwrite(pmd_pte(pmd), vma));
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index ccdbccfde148..558f7eef9c4d 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -102,9 +102,9 @@ static inline int huge_pte_dirty(pte_t pte)
return pte_dirty(pte);
}
-static inline pte_t huge_pte_mkwrite(pte_t pte)
+static inline pte_t huge_pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite(pte, vma);
}
static inline pte_t huge_pte_mkdirty(pte_t pte)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 6822a11c2c8a..68d51ca1ad55 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1005,7 +1005,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_kernel(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
if (pte_val(pte) & _PAGE_DIRTY)
@@ -1013,6 +1013,11 @@ static inline pte_t pte_mkwrite(pte_t pte)
return pte;
}
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_mkwrite_kernel(pte);
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
@@ -1488,7 +1493,7 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite_kernel(pmd_t pmd)
{
pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
@@ -1496,6 +1501,11 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return pmd;
}
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd_mkwrite_kernel(pmd);
+}
+
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 5ba3bd8a7b12..86bacde6e3aa 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -97,7 +97,7 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (flags & SET_MEMORY_RO)
new = pte_wrprotect(new);
else if (flags & SET_MEMORY_RW)
- new = pte_mkwrite(pte_mkdirty(new));
+ new = pte_mkwrite_kernel(pte_mkdirty(new));
if (flags & SET_MEMORY_NX)
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
else if (flags & SET_MEMORY_X)
@@ -155,7 +155,7 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
if (flags & SET_MEMORY_RO)
new = pmd_wrprotect(new);
else if (flags & SET_MEMORY_RW)
- new = pmd_mkwrite(pmd_mkdirty(new));
+ new = pmd_mkwrite_kernel(pmd_mkdirty(new));
if (flags & SET_MEMORY_NX)
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 21952b094650..9f2dcb9eafc8 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -351,6 +351,12 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define PTE_BIT_FUNC(h,fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
+#define PTE_BIT_FUNC_VMA(h,fn,op) \
+static inline pte_t pte_##fn(pte_t pte, struct vm_area_struct *vma) \
+{ \
+ pte.pte_##h op; \
+ return pte; \
+}
#ifdef CONFIG_X2TLB
/*
@@ -359,11 +365,11 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; }
* kernel permissions), we attempt to couple them a bit more sanely here.
*/
PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
-PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
+PTE_BIT_FUNC_VMA(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
#else
PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW);
-PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW);
+PTE_BIT_FUNC_VMA(low, mkwrite, |= _PAGE_RW);
PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE);
#endif
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index d342ea08843f..70a07f4f2142 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -21,7 +21,7 @@ static int __init scan_cache(unsigned long node, const char *uname,
if (!of_flat_dt_is_compatible(node, "jcore,cache"))
return 0;
- j2_ccr_base = (u32 __iomem *)of_flat_dt_translate_address(node);
+ j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4);
return 1;
}
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index d4330e3c57a6..3e8836179456 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -241,7 +241,7 @@ static inline pte_t pte_mkold(pte_t pte)
return __pte(pte_val(pte) & ~SRMMU_REF);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
return __pte(pte_val(pte) | SRMMU_WRITE);
}
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 5563efa1a19f..e2723bf39658 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -517,7 +517,7 @@ static inline pte_t pte_mkclean(pte_t pte)
return __pte(val);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
unsigned long val = pte_val(pte), mask;
@@ -772,11 +772,11 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return __pmd(pte_val(pte));
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
pte_t pte = __pte(pmd_val(pmd));
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
return __pmd(pte_val(pte));
}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index dad38960d1a8..82da8a2d769d 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -751,7 +751,7 @@ out:
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 570e43e6fda5..b4e410976e0d 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -562,7 +562,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index a70d1618eb35..963479c133b7 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -207,7 +207,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
return(pte);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
return pte;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 53bab123a8ee..ce460d6b4e25 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1852,6 +1852,11 @@ config CC_HAS_IBT
(CC_IS_CLANG && CLANG_VERSION >= 140000)) && \
$(as-instr,endbr64)
+config X86_CET
+ def_bool n
+ help
+ CET features configured (Shadow stack or IBT)
+
config X86_KERNEL_IBT
prompt "Indirect Branch Tracking"
def_bool y
@@ -1859,6 +1864,7 @@ config X86_KERNEL_IBT
# https://github.com/llvm/llvm-project/commit/9d7001eba9c4cb311e03cd8cdc231f9e579f2d0f
depends on !LD_IS_LLD || LLD_VERSION >= 140000
select OBJTOOL
+ select X86_CET
help
Build the kernel with support for Indirect Branch Tracking, a
hardware support course-grain forward-edge Control Flow Integrity
@@ -1952,6 +1958,24 @@ config X86_SGX
If unsure, say N.
+config X86_USER_SHADOW_STACK
+ bool "X86 userspace shadow stack"
+ depends on AS_WRUSS
+ depends on X86_64
+ select ARCH_USES_HIGH_VMA_FLAGS
+ select X86_CET
+ help
+ Shadow stack protection is a hardware feature that detects function
+ return address corruption. This helps mitigate ROP attacks.
+ Applications must be enabled to use it, and old userspace does not
+ get protection "for free".
+
+ CPUs supporting shadow stacks were first released in 2020.
+
+ See Documentation/x86/shstk.rst for more information.
+
+ If unsure, say N.
+
config EFI
bool "EFI runtime service support"
depends on ACPI
diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler
index b88f784cb02e..8ad41da301e5 100644
--- a/arch/x86/Kconfig.assembler
+++ b/arch/x86/Kconfig.assembler
@@ -24,3 +24,8 @@ config AS_GFNI
def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2)
help
Supported by binutils >= 2.30 and LLVM integrated assembler
+
+config AS_WRUSS
+ def_bool $(as-instr,wrussq %rax$(comma)(%rbx))
+ help
+ Supported by binutils >= 2.31 and LLVM integrated assembler
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 320480a8db4f..bc0a3c941b35 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -455,3 +455,4 @@
448 i386 process_mrelease sys_process_mrelease
449 i386 futex_waitv sys_futex_waitv
450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node
+451 i386 cachestat sys_cachestat
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index c84d12608cd2..38db4b1c291a 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -372,6 +372,8 @@
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 64 map_shadow_stack sys_map_shadow_stack
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 1ba5d3b99b16..85d38b9f3586 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -20,6 +20,8 @@ void __init hv_vtl_init_platform(void)
{
pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
+ x86_platform.realmode_reserve = x86_init_noop;
+ x86_platform.realmode_init = x86_init_noop;
x86_init.irqs.pre_vector_init = x86_init_noop;
x86_init.timers.timer_init = x86_init_noop;
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index cb8ca46213be..d7215c8b7923 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -308,6 +308,7 @@
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
+#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -380,6 +381,7 @@
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
#define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
+#define X86_FEATURE_SHSTK (16*32+ 7) /* "" Shadow stack */
#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index fafe9be7a6f4..702d93fdd10e 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -105,6 +105,18 @@
# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31))
#endif
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+#define DISABLE_USER_SHSTK 0
+#else
+#define DISABLE_USER_SHSTK (1 << (X86_FEATURE_USER_SHSTK & 31))
+#endif
+
+#ifdef CONFIG_X86_KERNEL_IBT
+#define DISABLE_IBT 0
+#else
+#define DISABLE_IBT (1 << (X86_FEATURE_IBT & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -120,7 +132,7 @@
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
- DISABLE_CALL_DEPTH_TRACKING)
+ DISABLE_CALL_DEPTH_TRACKING|DISABLE_USER_SHSTK)
#define DISABLED_MASK12 (DISABLE_LAM)
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
@@ -128,7 +140,7 @@
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
DISABLE_ENQCMD)
#define DISABLED_MASK17 0
-#define DISABLED_MASK18 0
+#define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 0
#define DISABLED_MASK20 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 503a577814b2..aadc6893dcaa 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -82,6 +82,15 @@ static inline void fpregs_unlock(void)
preempt_enable();
}
+/*
+ * FPU state gets lazily restored before returning to userspace. So when in the
+ * kernel, the valid FPU state may be kept in the buffer. This function will force
+ * restore all the fpu state to the registers early if needed, and lock them from
+ * being automatically saved/restored. Then FPU state can be modified safely in the
+ * registers, before unlocking with fpregs_unlock().
+ */
+void fpregs_lock_and_load(void);
+
#ifdef CONFIG_X86_DEBUG_FPU
extern void fpregs_assert_state_consistent(void);
#else
diff --git a/arch/x86/include/asm/fpu/regset.h b/arch/x86/include/asm/fpu/regset.h
index 4f928d6a367b..697b77e96025 100644
--- a/arch/x86/include/asm/fpu/regset.h
+++ b/arch/x86/include/asm/fpu/regset.h
@@ -7,11 +7,12 @@
#include <linux/regset.h>
-extern user_regset_active_fn regset_fpregs_active, regset_xregset_fpregs_active;
+extern user_regset_active_fn regset_fpregs_active, regset_xregset_fpregs_active,
+ ssp_active;
extern user_regset_get2_fn fpregs_get, xfpregs_get, fpregs_soft_get,
- xstateregs_get;
+ xstateregs_get, ssp_get;
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
- xstateregs_set;
+ xstateregs_set, ssp_set;
/*
* xstateregs_active == regset_fpregs_active. Please refer to the comment
diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h
index c2d6cd78ed0c..3c2903bbb456 100644
--- a/arch/x86/include/asm/fpu/sched.h
+++ b/arch/x86/include/asm/fpu/sched.h
@@ -11,7 +11,8 @@
extern void save_fpregs_to_fpstate(struct fpu *fpu);
extern void fpu__drop(struct fpu *fpu);
-extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal);
+extern int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
+ unsigned long shstk_addr);
extern void fpu_flush_thread(void);
/*
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 7f6d858ff47a..eb810074f1e7 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -115,8 +115,8 @@ enum xfeature {
XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
XFEATURE_PKRU,
XFEATURE_PASID,
- XFEATURE_RSRVD_COMP_11,
- XFEATURE_RSRVD_COMP_12,
+ XFEATURE_CET_USER,
+ XFEATURE_CET_KERNEL_UNUSED,
XFEATURE_RSRVD_COMP_13,
XFEATURE_RSRVD_COMP_14,
XFEATURE_LBR,
@@ -138,6 +138,8 @@ enum xfeature {
#define XFEATURE_MASK_PT (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
#define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU)
#define XFEATURE_MASK_PASID (1 << XFEATURE_PASID)
+#define XFEATURE_MASK_CET_USER (1 << XFEATURE_CET_USER)
+#define XFEATURE_MASK_CET_KERNEL (1 << XFEATURE_CET_KERNEL_UNUSED)
#define XFEATURE_MASK_LBR (1 << XFEATURE_LBR)
#define XFEATURE_MASK_XTILE_CFG (1 << XFEATURE_XTILE_CFG)
#define XFEATURE_MASK_XTILE_DATA (1 << XFEATURE_XTILE_DATA)
@@ -253,6 +255,16 @@ struct pkru_state {
} __packed;
/*
+ * State component 11 is Control-flow Enforcement user states
+ */
+struct cet_user_state {
+ /* user control-flow settings */
+ u64 user_cet;
+ /* user shadow stack pointer */
+ u64 user_ssp;
+};
+
+/*
* State component 15: Architectural LBR configuration state.
* The size of Arch LBR state depends on the number of LBRs (lbr_depth).
*/
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index cd3dd170e23a..d4427b88ee12 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -50,7 +50,8 @@
#define XFEATURE_MASK_USER_DYNAMIC XFEATURE_MASK_XTILE_DATA
/* All currently supported supervisor features */
-#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID)
+#define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID | \
+ XFEATURE_MASK_CET_USER)
/*
* A supervisor state component may not always contain valuable information,
@@ -77,7 +78,8 @@
* Unsupported supervisor features. When a supervisor feature in this mask is
* supported in the future, move it to the supported supervisor feature mask.
*/
-#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT)
+#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT | \
+ XFEATURE_MASK_CET_KERNEL)
/* All supervisor states including supported and unsupported states. */
#define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index b241af4ce9b4..61e0e6301f09 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -614,7 +614,7 @@ DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_DF, xenpv_exc_double_fault);
#endif
/* #CP */
-#ifdef CONFIG_X86_KERNEL_IBT
+#ifdef CONFIG_X86_CET
DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_CP, exc_control_protection);
#endif
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 1d29dc791f5a..416901d406f8 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -186,6 +186,8 @@ do { \
#else
#define deactivate_mm(tsk, mm) \
do { \
+ if (!tsk->vfork_done) \
+ shstk_free(tsk); \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 15ae4d6ba476..6f57f285d007 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -124,9 +124,17 @@ extern pmdval_t early_pmd_flags;
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_dirty(pte_t pte)
+static inline bool pte_dirty(pte_t pte)
{
- return pte_flags(pte) & _PAGE_DIRTY;
+ return pte_flags(pte) & _PAGE_DIRTY_BITS;
+}
+
+static inline bool pte_shstk(pte_t pte)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return false;
+
+ return (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
@@ -134,9 +142,18 @@ static inline int pte_young(pte_t pte)
return pte_flags(pte) & _PAGE_ACCESSED;
}
-static inline int pmd_dirty(pmd_t pmd)
+static inline bool pmd_dirty(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
+}
+
+static inline bool pmd_shstk(pmd_t pmd)
{
- return pmd_flags(pmd) & _PAGE_DIRTY;
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return false;
+
+ return (pmd_flags(pmd) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
+ (_PAGE_DIRTY | _PAGE_PSE);
}
#define pmd_young pmd_young
@@ -145,9 +162,9 @@ static inline int pmd_young(pmd_t pmd)
return pmd_flags(pmd) & _PAGE_ACCESSED;
}
-static inline int pud_dirty(pud_t pud)
+static inline bool pud_dirty(pud_t pud)
{
- return pud_flags(pud) & _PAGE_DIRTY;
+ return pud_flags(pud) & _PAGE_DIRTY_BITS;
}
static inline int pud_young(pud_t pud)
@@ -157,7 +174,27 @@ static inline int pud_young(pud_t pud)
static inline int pte_write(pte_t pte)
{
- return pte_flags(pte) & _PAGE_RW;
+ /*
+ * Shadow stack pages are logically writable, but do not have
+ * _PAGE_RW. Check for them separately from _PAGE_RW itself.
+ */
+ return (pte_flags(pte) & _PAGE_RW) || pte_shstk(pte);
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ /*
+ * Shadow stack pages are logically writable, but do not have
+ * _PAGE_RW. Check for them separately from _PAGE_RW itself.
+ */
+ return (pmd_flags(pmd) & _PAGE_RW) || pmd_shstk(pmd);
+}
+
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_RW;
}
static inline int pte_huge(pte_t pte)
@@ -289,9 +326,57 @@ static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
return native_make_pte(v & ~clear);
}
+/*
+ * Write protection operations can result in Dirty=1,Write=0 PTEs. But in the
+ * case of X86_FEATURE_USER_SHSTK, the software SavedDirty bit is used, since
+ * the Dirty=1,Write=0 will result in the memory being treated as shadow stack
+ * by the HW. So when creating dirty, write-protected memory, a software bit is
+ * used _PAGE_BIT_SAVED_DIRTY. The following functions pte_mksaveddirty() and
+ * pte_clear_saveddirty() take a conventional dirty, write-protected PTE
+ * (Write=0,Dirty=1) and transition it to the shadow stack compatible
+ * version. (Write=0,SavedDirty=1).
+ */
+static inline pte_t pte_mksaveddirty(pte_t pte)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return pte;
+
+ pte = pte_clear_flags(pte, _PAGE_DIRTY);
+ return pte_set_flags(pte, _PAGE_SAVED_DIRTY);
+}
+
+static inline pte_t pte_clear_saveddirty(pte_t pte)
+{
+ /*
+ * _PAGE_SAVED_DIRTY is unnecessary on !X86_FEATURE_USER_SHSTK kernels,
+ * since the HW dirty bit can be used without creating shadow stack
+ * memory. See the _PAGE_SAVED_DIRTY definition for more details.
+ */
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return pte;
+
+ /*
+ * PTE is getting copied-on-write, so it will be dirtied
+ * if writable, or made shadow stack if shadow stack and
+ * being copied on access. Set the dirty bit for both
+ * cases.
+ */
+ pte = pte_set_flags(pte, _PAGE_DIRTY);
+ return pte_clear_flags(pte, _PAGE_SAVED_DIRTY);
+}
+
static inline pte_t pte_wrprotect(pte_t pte)
{
- return pte_clear_flags(pte, _PAGE_RW);
+ pte = pte_clear_flags(pte, _PAGE_RW);
+
+ /*
+ * Blindly clearing _PAGE_RW might accidentally create
+ * a shadow stack PTE (Write=0,Dirty=1). Move the hardware
+ * dirty value to the software bit.
+ */
+ if (pte_dirty(pte))
+ pte = pte_mksaveddirty(pte);
+ return pte;
}
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
@@ -329,7 +414,7 @@ static inline pte_t pte_clear_uffd_wp(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
- return pte_clear_flags(pte, _PAGE_DIRTY);
+ return pte_clear_flags(pte, _PAGE_DIRTY_BITS);
}
static inline pte_t pte_mkold(pte_t pte)
@@ -344,7 +429,19 @@ static inline pte_t pte_mkexec(pte_t pte)
static inline pte_t pte_mkdirty(pte_t pte)
{
- return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+ pteval_t dirty = _PAGE_DIRTY;
+
+ /* Avoid creating Dirty=1,Write=0 PTEs */
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK) && !pte_write(pte))
+ dirty = _PAGE_SAVED_DIRTY;
+
+ return pte_set_flags(pte, dirty | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_mkwrite_shstk(pte_t pte)
+{
+ /* pte_clear_saveddirty() also sets Dirty=1 */
+ return pte_clear_saveddirty(pte);
}
static inline pte_t pte_mkyoung(pte_t pte)
@@ -352,11 +449,15 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte_set_flags(pte, _PAGE_ACCESSED);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite_kernel(pte_t pte)
{
return pte_set_flags(pte, _PAGE_RW);
}
+struct vm_area_struct;
+
+pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
+
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte_set_flags(pte, _PAGE_PSE);
@@ -401,9 +502,37 @@ static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
return native_make_pmd(v & ~clear);
}
+/* See comments above pte_mksaveddirty() */
+static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return pmd;
+
+ pmd = pmd_clear_flags(pmd, _PAGE_DIRTY);
+ return pmd_set_flags(pmd, _PAGE_SAVED_DIRTY);
+}
+
+/* See comments above pte_mksaveddirty() */
+static inline pmd_t pmd_clear_saveddirty(pmd_t pmd)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return pmd;
+
+ pmd = pmd_set_flags(pmd, _PAGE_DIRTY);
+ return pmd_clear_flags(pmd, _PAGE_SAVED_DIRTY);
+}
+
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
- return pmd_clear_flags(pmd, _PAGE_RW);
+ pmd = pmd_clear_flags(pmd, _PAGE_RW);
+ /*
+ * Blindly clearing _PAGE_RW might accidentally create
+ * a shadow stack PMD (RW=0, Dirty=1). Move the hardware
+ * dirty value to the software bit.
+ */
+ if (pmd_dirty(pmd))
+ pmd = pmd_mksaveddirty(pmd);
+ return pmd;
}
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
@@ -430,12 +559,23 @@ static inline pmd_t pmd_mkold(pmd_t pmd)
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
- return pmd_clear_flags(pmd, _PAGE_DIRTY);
+ return pmd_clear_flags(pmd, _PAGE_DIRTY_BITS);
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
- return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+ pmdval_t dirty = _PAGE_DIRTY;
+
+ /* Avoid creating (HW)Dirty=1, Write=0 PMDs */
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK) && !pmd_write(pmd))
+ dirty = _PAGE_SAVED_DIRTY;
+
+ return pmd_set_flags(pmd, dirty | _PAGE_SOFT_DIRTY);
+}
+
+static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd)
+{
+ return pmd_clear_saveddirty(pmd);
}
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
@@ -453,10 +593,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_ACCESSED);
}
-static inline pmd_t pmd_mkwrite(pmd_t pmd)
-{
- return pmd_set_flags(pmd, _PAGE_RW);
-}
+pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
{
@@ -472,6 +609,26 @@ static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
return native_make_pud(v & ~clear);
}
+/* See comments above pte_mksaveddirty() */
+static inline pud_t pud_mksaveddirty(pud_t pud)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return pud;
+
+ pud = pud_clear_flags(pud, _PAGE_DIRTY);
+ return pud_set_flags(pud, _PAGE_SAVED_DIRTY);
+}
+
+/* See comments above pte_mksaveddirty() */
+static inline pud_t pud_clear_saveddirty(pud_t pud)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return pud;
+
+ pud = pud_set_flags(pud, _PAGE_DIRTY);
+ return pud_clear_flags(pud, _PAGE_SAVED_DIRTY);
+}
+
static inline pud_t pud_mkold(pud_t pud)
{
return pud_clear_flags(pud, _PAGE_ACCESSED);
@@ -479,17 +636,32 @@ static inline pud_t pud_mkold(pud_t pud)
static inline pud_t pud_mkclean(pud_t pud)
{
- return pud_clear_flags(pud, _PAGE_DIRTY);
+ return pud_clear_flags(pud, _PAGE_DIRTY_BITS);
}
static inline pud_t pud_wrprotect(pud_t pud)
{
- return pud_clear_flags(pud, _PAGE_RW);
+ pud = pud_clear_flags(pud, _PAGE_RW);
+
+ /*
+ * Blindly clearing _PAGE_RW might accidentally create
+ * a shadow stack PUD (RW=0, Dirty=1). Move the hardware
+ * dirty value to the software bit.
+ */
+ if (pud_dirty(pud))
+ pud = pud_mksaveddirty(pud);
+ return pud;
}
static inline pud_t pud_mkdirty(pud_t pud)
{
- return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+ pudval_t dirty = _PAGE_DIRTY;
+
+ /* Avoid creating (HW)Dirty=1, Write=0 PUDs */
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK) && !pud_write(pud))
+ dirty = _PAGE_SAVED_DIRTY;
+
+ return pud_set_flags(pud, dirty | _PAGE_SOFT_DIRTY);
}
static inline pud_t pud_mkdevmap(pud_t pud)
@@ -509,7 +681,11 @@ static inline pud_t pud_mkyoung(pud_t pud)
static inline pud_t pud_mkwrite(pud_t pud)
{
- return pud_set_flags(pud, _PAGE_RW);
+ pud = pud_set_flags(pud, _PAGE_RW);
+
+ if (pud_dirty(pud))
+ pud = pud_clear_saveddirty(pud);
+ return pud;
}
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
@@ -626,6 +802,8 @@ static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pteval_t val = pte_val(pte), oldval = val;
+ bool wr_protected;
+ pte_t pte_result;
/*
* Chop off the NX bit (if present), and add the NX portion of
@@ -634,17 +812,43 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
val &= _PAGE_CHG_MASK;
val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
- return __pte(val);
+
+ pte_result = __pte(val);
+
+ /*
+ * Do the saveddirty fixup if the PTE was just write protected and
+ * it's dirty.
+ */
+ wr_protected = (oldval & _PAGE_RW) && !(val & _PAGE_RW);
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK) && wr_protected &&
+ (val & _PAGE_DIRTY))
+ pte_result = pte_mksaveddirty(pte_result);
+
+ return pte_result;
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmdval_t val = pmd_val(pmd), oldval = val;
+ bool wr_protected;
+ pmd_t pmd_result;
- val &= _HPAGE_CHG_MASK;
+ val &= (_HPAGE_CHG_MASK & ~_PAGE_DIRTY);
val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
- return __pmd(val);
+
+ pmd_result = __pmd(val);
+
+ /*
+ * Do the saveddirty fixup if the PMD was just write protected and
+ * it's dirty.
+ */
+ wr_protected = (oldval & _PAGE_RW) && !(val & _PAGE_RW);
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK) && wr_protected &&
+ (val & _PAGE_DIRTY))
+ pmd_result = pmd_mksaveddirty(pmd_result);
+
+ return pmd_result;
}
/*
@@ -828,7 +1032,15 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* (Currently stuck as a macro because of indirect forward reference
* to linux/mm.h:page_to_nid())
*/
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+#define mk_pte(page, pgprot) \
+({ \
+ pgprot_t __pgprot = pgprot; \
+ \
+ WARN_ON_ONCE(cpu_feature_enabled(X86_FEATURE_USER_SHSTK) && \
+ (pgprot_val(__pgprot) & (_PAGE_DIRTY | _PAGE_RW)) == \
+ _PAGE_DIRTY); \
+ pfn_pte(page_to_pfn(page), __pgprot); \
+})
static inline int pmd_bad(pmd_t pmd)
{
@@ -1094,6 +1306,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+ /*
+ * Avoid accidentally creating shadow stack PTEs
+ * (Write=0,Dirty=1). Use cmpxchg() to prevent races with
+ * the hardware setting Dirty=1.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) {
+ pte_t old_pte, new_pte;
+
+ old_pte = READ_ONCE(*ptep);
+ do {
+ new_pte = pte_wrprotect(old_pte);
+ } while (!try_cmpxchg(&ptep->pte, &old_pte.pte, new_pte.pte));
+
+ return;
+ }
+#endif
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
}
@@ -1120,12 +1349,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
-#define pmd_write pmd_write
-static inline int pmd_write(pmd_t pmd)
-{
- return pmd_flags(pmd) & _PAGE_RW;
-}
-
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
@@ -1152,13 +1375,25 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
- clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
-}
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+ /*
+ * Avoid accidentally creating shadow stack PTEs
+ * (Write=0,Dirty=1). Use cmpxchg() to prevent races with
+ * the hardware setting Dirty=1.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) {
+ pmd_t old_pmd, new_pmd;
-#define pud_write pud_write
-static inline int pud_write(pud_t pud)
-{
- return pud_flags(pud) & _PAGE_RW;
+ old_pmd = READ_ONCE(*pmdp);
+ do {
+ new_pmd = pmd_wrprotect(old_pmd);
+ } while (!try_cmpxchg(&pmdp->pmd, &old_pmd.pmd, new_pmd.pmd));
+
+ return;
+ }
+#endif
+
+ clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}
#ifndef pmdp_establish
@@ -1411,6 +1646,11 @@ static inline bool __pte_access_permitted(unsigned long pteval, bool write)
{
unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
+ /*
+ * Write=0,Dirty=1 PTEs are shadow stack, which the kernel
+ * shouldn't generally allow access to, but since they
+ * are already Write=0, the below logic covers both cases.
+ */
if (write)
need_pte_bits |= _PAGE_RW;
@@ -1452,6 +1692,12 @@ static inline bool arch_has_hw_pte_young(void)
return true;
}
+#define arch_check_zapped_pte arch_check_zapped_pte
+void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
+
+#define arch_check_zapped_pmd arch_check_zapped_pmd
+void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd);
+
#ifdef CONFIG_XEN_PV
#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
static inline bool arch_has_hw_nonleaf_pmd_young(void)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 447d4bee25c4..8f266788c0d7 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -21,7 +21,8 @@
#define _PAGE_BIT_SOFTW2 10 /* " */
#define _PAGE_BIT_SOFTW3 11 /* " */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
-#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
+#define _PAGE_BIT_SOFTW4 57 /* available for programmer */
+#define _PAGE_BIT_SOFTW5 58 /* available for programmer */
#define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
#define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
#define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
@@ -34,6 +35,15 @@
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
+/*
+ * Indicates a Saved Dirty bit page.
+ */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit */
+#else
+#define _PAGE_BIT_SAVED_DIRTY 0
+#endif
+
/* If _PAGE_BIT_PRESENT is clear, we use these: */
/* - if the user mapped it with PROT_NONE; pte_present gives true */
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
@@ -117,6 +127,25 @@
#define _PAGE_SOFTW4 (_AT(pteval_t, 0))
#endif
+/*
+ * The hardware requires shadow stack to be Write=0,Dirty=1. However,
+ * there are valid cases where the kernel might create read-only PTEs that
+ * are dirty (e.g., fork(), mprotect(), uffd-wp(), soft-dirty tracking). In
+ * this case, the _PAGE_SAVED_DIRTY bit is used instead of the HW-dirty bit,
+ * to avoid creating a wrong "shadow stack" PTEs. Such PTEs have
+ * (Write=0,SavedDirty=1,Dirty=0) set.
+ *
+ * Note that on processors without shadow stack support, the
+ * _PAGE_SAVED_DIRTY remains unused.
+ */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+#define _PAGE_SAVED_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SAVED_DIRTY)
+#else
+#define _PAGE_SAVED_DIRTY (_AT(pteval_t, 0))
+#endif
+
+#define _PAGE_DIRTY_BITS (_PAGE_DIRTY | _PAGE_SAVED_DIRTY)
+
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
/*
@@ -125,9 +154,9 @@
* instance, and is *not* included in this mask since
* pte_modify() does modify it.
*/
-#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
+#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY_BITS | \
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
_PAGE_UFFD_WP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
@@ -186,16 +215,21 @@ enum page_cache_mode {
#define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
#define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
-#define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G)
-#define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G)
-#define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0)
-#define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC)
+/*
+ * Page tables needs to have Write=1 in order for any lower PTEs to be
+ * writable. This includes shadow stack memory (Write=0, Dirty=1)
+ */
#define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
#define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
-#define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
-#define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0|___D| 0|___G)
+#define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0)
+#define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC)
+
+#define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G)
+#define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G)
+#define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX| 0| 0|___G)
+#define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0| 0| 0|___G)
#define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
-#define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
+#define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX| 0| 0|___G)
#define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
#define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G)
#define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a1e4fa58b357..2a5ec5750ba7 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -28,6 +28,7 @@ struct vm86;
#include <asm/unwind_hints.h>
#include <asm/vmxfeatures.h>
#include <asm/vdso/processor.h>
+#include <asm/shstk.h>
#include <linux/personality.h>
#include <linux/cache.h>
@@ -475,6 +476,13 @@ struct thread_struct {
*/
u32 pkru;
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+ unsigned long features;
+ unsigned long features_locked;
+
+ struct thread_shstk shstk;
+#endif
+
/* Floating point and extended processor state */
struct fpu fpu;
/*
diff --git a/arch/x86/include/asm/shstk.h b/arch/x86/include/asm/shstk.h
new file mode 100644
index 000000000000..42fee8959df7
--- /dev/null
+++ b/arch/x86/include/asm/shstk.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SHSTK_H
+#define _ASM_X86_SHSTK_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+struct task_struct;
+struct ksignal;
+
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+struct thread_shstk {
+ u64 base;
+ u64 size;
+};
+
+long shstk_prctl(struct task_struct *task, int option, unsigned long arg2);
+void reset_thread_features(void);
+unsigned long shstk_alloc_thread_stack(struct task_struct *p, unsigned long clone_flags,
+ unsigned long stack_size);
+void shstk_free(struct task_struct *p);
+int setup_signal_shadow_stack(struct ksignal *ksig);
+int restore_signal_shadow_stack(void);
+#else
+static inline long shstk_prctl(struct task_struct *task, int option,
+ unsigned long arg2) { return -EINVAL; }
+static inline void reset_thread_features(void) {}
+static inline unsigned long shstk_alloc_thread_stack(struct task_struct *p,
+ unsigned long clone_flags,
+ unsigned long stack_size) { return 0; }
+static inline void shstk_free(struct task_struct *p) {}
+static inline int setup_signal_shadow_stack(struct ksignal *ksig) { return 0; }
+static inline int restore_signal_shadow_stack(void) { return 0; }
+#endif /* CONFIG_X86_USER_SHADOW_STACK */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_X86_SHSTK_H */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index de48d1389936..d6cd9344f6c7 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -202,6 +202,19 @@ static inline void clwb(volatile void *__p)
: [pax] "a" (p));
}
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+static inline int write_user_shstk_64(u64 __user *addr, u64 val)
+{
+ asm_volatile_goto("1: wrussq %[val], (%[addr])\n"
+ _ASM_EXTABLE(1b, %l[fail])
+ :: [addr] "r" (addr), [val] "r" (val)
+ :: fail);
+ return 0;
+fail:
+ return -EFAULT;
+}
+#endif /* CONFIG_X86_USER_SHADOW_STACK */
+
#define nop() asm volatile ("nop")
static inline void serialize(void)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 75bfaa421030..965659d2c965 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -293,7 +293,8 @@ static inline bool pte_flags_need_flush(unsigned long oldflags,
const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
_PAGE_ACCESSED;
const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
- _PAGE_SOFTW3 | _PAGE_SOFTW4;
+ _PAGE_SOFTW3 | _PAGE_SOFTW4 |
+ _PAGE_SAVED_DIRTY;
const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
_PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
_PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
diff --git a/arch/x86/include/asm/trap_pf.h b/arch/x86/include/asm/trap_pf.h
index 10b1de500ab1..afa524325e55 100644
--- a/arch/x86/include/asm/trap_pf.h
+++ b/arch/x86/include/asm/trap_pf.h
@@ -11,6 +11,7 @@
* bit 3 == 1: use of reserved bit detected
* bit 4 == 1: fault was an instruction fetch
* bit 5 == 1: protection keys block access
+ * bit 6 == 1: shadow stack access fault
* bit 15 == 1: SGX MMU page-fault
*/
enum x86_pf_error_code {
@@ -20,6 +21,7 @@ enum x86_pf_error_code {
X86_PF_RSVD = 1 << 3,
X86_PF_INSTR = 1 << 4,
X86_PF_PK = 1 << 5,
+ X86_PF_SHSTK = 1 << 6,
X86_PF_SGX = 1 << 15,
};
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 47ecfff2c83d..75e0dabf0c45 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -47,4 +47,16 @@ void __noreturn handle_stack_overflow(struct pt_regs *regs,
struct stack_info *info);
#endif
+static inline void cond_local_irq_enable(struct pt_regs *regs)
+{
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_enable();
+}
+
+static inline void cond_local_irq_disable(struct pt_regs *regs)
+{
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_disable();
+}
+
#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/uapi/asm/mman.h b/arch/x86/include/uapi/asm/mman.h
index 775dbd3aff73..8148bdddbd2c 100644
--- a/arch/x86/include/uapi/asm/mman.h
+++ b/arch/x86/include/uapi/asm/mman.h
@@ -3,6 +3,7 @@
#define _ASM_X86_MMAN_H
#define MAP_32BIT 0x40 /* only give out 32bit addresses */
+#define MAP_ABOVE4G 0x80 /* only map above 4GB */
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
#define arch_calc_vm_prot_bits(prot, key) ( \
@@ -12,6 +13,9 @@
((key) & 0x8 ? VM_PKEY_BIT3 : 0))
#endif
+/* Flags for map_shadow_stack(2) */
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */
+
#include <asm-generic/mman.h>
#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index e8d7ebbca1a4..384e2cc6ac19 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -23,9 +23,21 @@
#define ARCH_MAP_VDSO_32 0x2002
#define ARCH_MAP_VDSO_64 0x2003
+/* Don't use 0x3001-0x3004 because of old glibcs */
+
#define ARCH_GET_UNTAG_MASK 0x4001
#define ARCH_ENABLE_TAGGED_ADDR 0x4002
#define ARCH_GET_MAX_TAG_BITS 0x4003
#define ARCH_FORCE_TAGGED_SVA 0x4004
+#define ARCH_SHSTK_ENABLE 0x5001
+#define ARCH_SHSTK_DISABLE 0x5002
+#define ARCH_SHSTK_LOCK 0x5003
+#define ARCH_SHSTK_UNLOCK 0x5004
+#define ARCH_SHSTK_STATUS 0x5005
+
+/* ARCH_SHSTK_ features bits */
+#define ARCH_SHSTK_SHSTK (1ULL << 0)
+#define ARCH_SHSTK_WRSS (1ULL << 1)
+
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index dd61752f4c96..b366641703e3 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -144,6 +144,10 @@ obj-$(CONFIG_CFI_CLANG) += cfi.o
obj-$(CONFIG_CALL_THUNKS) += callthunks.o
+obj-$(CONFIG_X86_CET) += cet.o
+
+obj-$(CONFIG_X86_USER_SHADOW_STACK) += shstk.o
+
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c
new file mode 100644
index 000000000000..cc10d8be9d74
--- /dev/null
+++ b/arch/x86/kernel/cet.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ptrace.h>
+#include <asm/bugs.h>
+#include <asm/traps.h>
+
+enum cp_error_code {
+ CP_EC = (1 << 15) - 1,
+
+ CP_RET = 1,
+ CP_IRET = 2,
+ CP_ENDBR = 3,
+ CP_RSTRORSSP = 4,
+ CP_SETSSBSY = 5,
+
+ CP_ENCL = 1 << 15,
+};
+
+static const char cp_err[][10] = {
+ [0] = "unknown",
+ [1] = "near ret",
+ [2] = "far/iret",
+ [3] = "endbranch",
+ [4] = "rstorssp",
+ [5] = "setssbsy",
+};
+
+static const char *cp_err_string(unsigned long error_code)
+{
+ unsigned int cpec = error_code & CP_EC;
+
+ if (cpec >= ARRAY_SIZE(cp_err))
+ cpec = 0;
+ return cp_err[cpec];
+}
+
+static void do_unexpected_cp(struct pt_regs *regs, unsigned long error_code)
+{
+ WARN_ONCE(1, "Unexpected %s #CP, error_code: %s\n",
+ user_mode(regs) ? "user mode" : "kernel mode",
+ cp_err_string(error_code));
+}
+
+static DEFINE_RATELIMIT_STATE(cpf_rate, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
+{
+ struct task_struct *tsk;
+ unsigned long ssp;
+
+ /*
+ * An exception was just taken from userspace. Since interrupts are disabled
+ * here, no scheduling should have messed with the registers yet and they
+ * will be whatever is live in userspace. So read the SSP before enabling
+ * interrupts so locking the fpregs to do it later is not required.
+ */
+ rdmsrl(MSR_IA32_PL3_SSP, ssp);
+
+ cond_local_irq_enable(regs);
+
+ tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = X86_TRAP_CP;
+
+ /* Ratelimit to prevent log spamming. */
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
+ __ratelimit(&cpf_rate)) {
+ pr_emerg("%s[%d] control protection ip:%lx sp:%lx ssp:%lx error:%lx(%s)%s",
+ tsk->comm, task_pid_nr(tsk),
+ regs->ip, regs->sp, ssp, error_code,
+ cp_err_string(error_code),
+ error_code & CP_ENCL ? " in enclave" : "");
+ print_vma_addr(KERN_CONT " in ", regs->ip);
+ pr_cont("\n");
+ }
+
+ force_sig_fault(SIGSEGV, SEGV_CPERR, (void __user *)0);
+ cond_local_irq_disable(regs);
+}
+
+static __ro_after_init bool ibt_fatal = true;
+
+/* code label defined in asm below */
+extern void ibt_selftest_ip(void);
+
+static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+{
+ if ((error_code & CP_EC) != CP_ENDBR) {
+ do_unexpected_cp(regs, error_code);
+ return;
+ }
+
+ if (unlikely(regs->ip == (unsigned long)&ibt_selftest_ip)) {
+ regs->ax = 0;
+ return;
+ }
+
+ pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
+ if (!ibt_fatal) {
+ printk(KERN_DEFAULT CUT_HERE);
+ __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
+ return;
+ }
+ BUG();
+}
+
+/* Must be noinline to ensure uniqueness of ibt_selftest_ip. */
+noinline bool ibt_selftest(void)
+{
+ unsigned long ret;
+
+ asm (" lea ibt_selftest_ip(%%rip), %%rax\n\t"
+ ANNOTATE_RETPOLINE_SAFE
+ " jmp *%%rax\n\t"
+ "ibt_selftest_ip:\n\t"
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ " nop\n\t"
+
+ : "=a" (ret) : : "memory");
+
+ return !ret;
+}
+
+static int __init ibt_setup(char *str)
+{
+ if (!strcmp(str, "off"))
+ setup_clear_cpu_cap(X86_FEATURE_IBT);
+
+ if (!strcmp(str, "warn"))
+ ibt_fatal = false;
+
+ return 1;
+}
+
+__setup("ibt=", ibt_setup);
+
+DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)
+{
+ if (user_mode(regs)) {
+ if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ do_user_cp_fault(regs, error_code);
+ else
+ do_unexpected_cp(regs, error_code);
+ } else {
+ if (cpu_feature_enabled(X86_FEATURE_IBT))
+ do_kernel_cp_fault(regs, error_code);
+ else
+ do_unexpected_cp(regs, error_code);
+ }
+}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 80710a68ef7d..3ea06b0b4570 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -601,27 +601,43 @@ __noendbr void ibt_restore(u64 save)
static __always_inline void setup_cet(struct cpuinfo_x86 *c)
{
- u64 msr = CET_ENDBR_EN;
+ bool user_shstk, kernel_ibt;
- if (!HAS_KERNEL_IBT ||
- !cpu_feature_enabled(X86_FEATURE_IBT))
+ if (!IS_ENABLED(CONFIG_X86_CET))
return;
- wrmsrl(MSR_IA32_S_CET, msr);
+ kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT);
+ user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) &&
+ IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK);
+
+ if (!kernel_ibt && !user_shstk)
+ return;
+
+ if (user_shstk)
+ set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
+
+ if (kernel_ibt)
+ wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN);
+ else
+ wrmsrl(MSR_IA32_S_CET, 0);
+
cr4_set_bits(X86_CR4_CET);
- if (!ibt_selftest()) {
+ if (kernel_ibt && !ibt_selftest()) {
pr_err("IBT selftest: Failed!\n");
wrmsrl(MSR_IA32_S_CET, 0);
setup_clear_cpu_cap(X86_FEATURE_IBT);
- return;
}
}
__noendbr void cet_disable(void)
{
- if (cpu_feature_enabled(X86_FEATURE_IBT))
- wrmsrl(MSR_IA32_S_CET, 0);
+ if (!(cpu_feature_enabled(X86_FEATURE_IBT) ||
+ cpu_feature_enabled(X86_FEATURE_SHSTK)))
+ return;
+
+ wrmsrl(MSR_IA32_S_CET, 0);
+ wrmsrl(MSR_IA32_U_CET, 0);
}
/*
@@ -1483,6 +1499,9 @@ static void __init cpu_parse_early_param(void)
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+ if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
+ setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
+
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
if (arglen <= 0)
return;
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index f6748c8bd647..e462c1d3800a 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -81,6 +81,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_XFD, X86_FEATURE_XSAVES },
{ X86_FEATURE_XFD, X86_FEATURE_XGETBV1 },
{ X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
+ { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES },
{}
};
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index f5fdeb1e3606..dd33ee872339 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -596,11 +596,6 @@ void reload_ucode_amd(unsigned int cpu)
}
}
}
-static u16 __find_equiv_id(unsigned int cpu)
-{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- return find_equiv_id(&equiv_table, uci->cpu_sig.sig);
-}
/*
* a small, trivial cache of per-family ucode patches
@@ -651,9 +646,11 @@ static void free_cache(void)
static struct ucode_patch *find_patch(unsigned int cpu)
{
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u16 equiv_id;
- equiv_id = __find_equiv_id(cpu);
+
+ equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
if (!equiv_id)
return NULL;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 3afcf3de0dd4..ba00a229d054 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -440,9 +440,6 @@ static int microcode_reload_late(void)
int old = boot_cpu_data.microcode, ret;
struct cpuinfo_x86 prev_info;
- pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
- pr_err("You should switch to early loading, if possible.\n");
-
atomic_set(&late_cpus_in, 0);
atomic_set(&late_cpus_out, 0);
@@ -498,7 +495,11 @@ put:
if (ret == 0)
ret = size;
- add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+ pr_err("Late microcode loading is dangerous and taints the kernel.\n");
+ pr_err("You should switch to early loading if possible.\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
return ret;
}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 099b6f0d96bd..31c0e68f6227 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -4,6 +4,8 @@
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/cpufreq.h>
+#include <asm/prctl.h>
+#include <linux/proc_fs.h>
#include "cpu.h"
@@ -175,3 +177,24 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
+
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+static void dump_x86_features(struct seq_file *m, unsigned long features)
+{
+ if (features & ARCH_SHSTK_SHSTK)
+ seq_puts(m, "shstk ");
+ if (features & ARCH_SHSTK_WRSS)
+ seq_puts(m, "wrss ");
+}
+
+void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task)
+{
+ seq_puts(m, "x86_Thread_features:\t");
+ dump_x86_features(m, task->thread.features);
+ seq_putc(m, '\n');
+
+ seq_puts(m, "x86_Thread_features_locked:\t");
+ dump_x86_features(m, task->thread.features_locked);
+ seq_putc(m, '\n');
+}
+#endif /* CONFIG_X86_USER_SHADOW_STACK */
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index caf33486dc5e..aa4856b236b8 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -552,8 +552,36 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu)
}
}
+/* A passed ssp of zero will not cause any update */
+static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
+{
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+ struct cet_user_state *xstate;
+
+ /* If ssp update is not needed. */
+ if (!ssp)
+ return 0;
+
+ xstate = get_xsave_addr(&dst->thread.fpu.fpstate->regs.xsave,
+ XFEATURE_CET_USER);
+
+ /*
+ * If there is a non-zero ssp, then 'dst' must be configured with a shadow
+ * stack and the fpu state should be up to date since it was just copied
+ * from the parent in fpu_clone(). So there must be a valid non-init CET
+ * state location in the buffer.
+ */
+ if (WARN_ON_ONCE(!xstate))
+ return 1;
+
+ xstate->user_ssp = (u64)ssp;
+#endif
+ return 0;
+}
+
/* Clone current's FPU state on fork */
-int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
+int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
+ unsigned long ssp)
{
struct fpu *src_fpu = &current->thread.fpu;
struct fpu *dst_fpu = &dst->thread.fpu;
@@ -613,6 +641,12 @@ int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal)
if (use_xsave())
dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID;
+ /*
+ * Update shadow stack pointer, in case it changed during clone.
+ */
+ if (update_fpu_shstk(dst, ssp))
+ return 1;
+
trace_x86_fpu_copy_src(src_fpu);
trace_x86_fpu_copy_dst(dst_fpu);
@@ -753,6 +787,24 @@ void switch_fpu_return(void)
}
EXPORT_SYMBOL_GPL(switch_fpu_return);
+void fpregs_lock_and_load(void)
+{
+ /*
+ * fpregs_lock() only disables preemption (mostly). So modifying state
+ * in an interrupt could screw up some in progress fpregs operation.
+ * Warn about it.
+ */
+ WARN_ON_ONCE(!irq_fpu_usable());
+ WARN_ON_ONCE(current->flags & PF_KTHREAD);
+
+ fpregs_lock();
+
+ fpregs_assert_state_consistent();
+
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ fpregs_restore_userregs();
+}
+
#ifdef CONFIG_X86_DEBUG_FPU
/*
* If current FPU state according to its tracking (loaded FPU context on this
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 6d056b68f4ed..6bc1eb2a21bd 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -8,6 +8,7 @@
#include <asm/fpu/api.h>
#include <asm/fpu/signal.h>
#include <asm/fpu/regset.h>
+#include <asm/prctl.h>
#include "context.h"
#include "internal.h"
@@ -174,6 +175,86 @@ out:
return ret;
}
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+int ssp_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (target->thread.features & ARCH_SHSTK_SHSTK)
+ return regset->n;
+
+ return 0;
+}
+
+int ssp_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ struct fpu *fpu = &target->thread.fpu;
+ struct cet_user_state *cetregs;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return -ENODEV;
+
+ sync_fpstate(fpu);
+ cetregs = get_xsave_addr(&fpu->fpstate->regs.xsave, XFEATURE_CET_USER);
+ if (WARN_ON(!cetregs)) {
+ /*
+ * This shouldn't ever be NULL because shadow stack was
+ * verified to be enabled above. This means
+ * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so
+ * XFEATURE_CET_USER should not be in the init state.
+ */
+ return -ENODEV;
+ }
+
+ return membuf_write(&to, (unsigned long *)&cetregs->user_ssp,
+ sizeof(cetregs->user_ssp));
+}
+
+int ssp_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct fpu *fpu = &target->thread.fpu;
+ struct xregs_state *xsave = &fpu->fpstate->regs.xsave;
+ struct cet_user_state *cetregs;
+ unsigned long user_ssp;
+ int r;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
+ !ssp_active(target, regset))
+ return -ENODEV;
+
+ if (pos != 0 || count != sizeof(user_ssp))
+ return -EINVAL;
+
+ r = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_ssp, 0, -1);
+ if (r)
+ return r;
+
+ /*
+ * Some kernel instructions (IRET, etc) can cause exceptions in the case
+ * of disallowed CET register values. Just prevent invalid values.
+ */
+ if (user_ssp >= TASK_SIZE_MAX || !IS_ALIGNED(user_ssp, 8))
+ return -EINVAL;
+
+ fpu_force_restore(fpu);
+
+ cetregs = get_xsave_addr(xsave, XFEATURE_CET_USER);
+ if (WARN_ON(!cetregs)) {
+ /*
+ * This shouldn't ever be NULL because shadow stack was
+ * verified to be enabled above. This means
+ * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so
+ * XFEATURE_CET_USER should not be in the init state.
+ */
+ return -ENODEV;
+ }
+
+ cetregs->user_ssp = user_ssp;
+ return 0;
+}
+#endif /* CONFIG_X86_USER_SHADOW_STACK */
+
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 0bab497c9436..4fa4751912d9 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -39,26 +39,26 @@
*/
static const char *xfeature_names[] =
{
- "x87 floating point registers" ,
- "SSE registers" ,
- "AVX registers" ,
- "MPX bounds registers" ,
- "MPX CSR" ,
- "AVX-512 opmask" ,
- "AVX-512 Hi256" ,
- "AVX-512 ZMM_Hi256" ,
- "Processor Trace (unused)" ,
+ "x87 floating point registers",
+ "SSE registers",
+ "AVX registers",
+ "MPX bounds registers",
+ "MPX CSR",
+ "AVX-512 opmask",
+ "AVX-512 Hi256",
+ "AVX-512 ZMM_Hi256",
+ "Processor Trace (unused)",
"Protection Keys User registers",
"PASID state",
- "unknown xstate feature" ,
- "unknown xstate feature" ,
- "unknown xstate feature" ,
- "unknown xstate feature" ,
- "unknown xstate feature" ,
- "unknown xstate feature" ,
- "AMX Tile config" ,
- "AMX Tile data" ,
- "unknown xstate feature" ,
+ "Control-flow User registers",
+ "Control-flow Kernel registers (unused)",
+ "unknown xstate feature",
+ "unknown xstate feature",
+ "unknown xstate feature",
+ "unknown xstate feature",
+ "AMX Tile config",
+ "AMX Tile data",
+ "unknown xstate feature",
};
static unsigned short xsave_cpuid_features[] __initdata = {
@@ -73,6 +73,7 @@ static unsigned short xsave_cpuid_features[] __initdata = {
[XFEATURE_PT_UNIMPLEMENTED_SO_FAR] = X86_FEATURE_INTEL_PT,
[XFEATURE_PKRU] = X86_FEATURE_PKU,
[XFEATURE_PASID] = X86_FEATURE_ENQCMD,
+ [XFEATURE_CET_USER] = X86_FEATURE_SHSTK,
[XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE,
[XFEATURE_XTILE_DATA] = X86_FEATURE_AMX_TILE,
};
@@ -276,6 +277,7 @@ static void __init print_xstate_features(void)
print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
print_xstate_feature(XFEATURE_MASK_PKRU);
print_xstate_feature(XFEATURE_MASK_PASID);
+ print_xstate_feature(XFEATURE_MASK_CET_USER);
print_xstate_feature(XFEATURE_MASK_XTILE_CFG);
print_xstate_feature(XFEATURE_MASK_XTILE_DATA);
}
@@ -344,6 +346,7 @@ static __init void os_xrstor_booting(struct xregs_state *xstate)
XFEATURE_MASK_BNDREGS | \
XFEATURE_MASK_BNDCSR | \
XFEATURE_MASK_PASID | \
+ XFEATURE_MASK_CET_USER | \
XFEATURE_MASK_XTILE)
/*
@@ -446,14 +449,15 @@ static void __init __xstate_dump_leaves(void)
} \
} while (0)
-#define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \
- if ((nr == nr_macro) && \
- WARN_ONCE(sz != sizeof(__struct), \
- "%s: struct is %zu bytes, cpu state %d bytes\n", \
- __stringify(nr_macro), sizeof(__struct), sz)) { \
+#define XCHECK_SZ(sz, nr, __struct) ({ \
+ if (WARN_ONCE(sz != sizeof(__struct), \
+ "[%s]: struct is %zu bytes, cpu state %d bytes\n", \
+ xfeature_names[nr], sizeof(__struct), sz)) { \
__xstate_dump_leaves(); \
} \
-} while (0)
+ true; \
+})
+
/**
* check_xtile_data_against_struct - Check tile data state size.
@@ -527,36 +531,28 @@ static bool __init check_xstate_against_struct(int nr)
* Ask the CPU for the size of the state.
*/
int sz = xfeature_size(nr);
+
/*
* Match each CPU state with the corresponding software
* structure.
*/
- XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct);
- XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state);
- XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state);
- XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
- XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
- XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
- XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
- XCHECK_SZ(sz, nr, XFEATURE_PASID, struct ia32_pasid_state);
- XCHECK_SZ(sz, nr, XFEATURE_XTILE_CFG, struct xtile_cfg);
-
- /* The tile data size varies between implementations. */
- if (nr == XFEATURE_XTILE_DATA)
- check_xtile_data_against_struct(sz);
-
- /*
- * Make *SURE* to add any feature numbers in below if
- * there are "holes" in the xsave state component
- * numbers.
- */
- if ((nr < XFEATURE_YMM) ||
- (nr >= XFEATURE_MAX) ||
- (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR) ||
- ((nr >= XFEATURE_RSRVD_COMP_11) && (nr <= XFEATURE_RSRVD_COMP_16))) {
+ switch (nr) {
+ case XFEATURE_YMM: return XCHECK_SZ(sz, nr, struct ymmh_struct);
+ case XFEATURE_BNDREGS: return XCHECK_SZ(sz, nr, struct mpx_bndreg_state);
+ case XFEATURE_BNDCSR: return XCHECK_SZ(sz, nr, struct mpx_bndcsr_state);
+ case XFEATURE_OPMASK: return XCHECK_SZ(sz, nr, struct avx_512_opmask_state);
+ case XFEATURE_ZMM_Hi256: return XCHECK_SZ(sz, nr, struct avx_512_zmm_uppers_state);
+ case XFEATURE_Hi16_ZMM: return XCHECK_SZ(sz, nr, struct avx_512_hi16_state);
+ case XFEATURE_PKRU: return XCHECK_SZ(sz, nr, struct pkru_state);
+ case XFEATURE_PASID: return XCHECK_SZ(sz, nr, struct ia32_pasid_state);
+ case XFEATURE_XTILE_CFG: return XCHECK_SZ(sz, nr, struct xtile_cfg);
+ case XFEATURE_CET_USER: return XCHECK_SZ(sz, nr, struct cet_user_state);
+ case XFEATURE_XTILE_DATA: check_xtile_data_against_struct(sz); return true;
+ default:
XSTATE_WARN_ON(1, "No structure for xstate: %d\n", nr);
return false;
}
+
return true;
}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index a58c6bc1cd68..5074b8420359 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -107,7 +107,7 @@ static const __initconst struct idt_data def_idts[] = {
ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
#endif
-#ifdef CONFIG_X86_KERNEL_IBT
+#ifdef CONFIG_X86_CET
INTG(X86_TRAP_CP, asm_exc_control_protection),
#endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index dac41a0072ea..3ab62ac98c2c 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -50,6 +50,7 @@
#include <asm/unwind.h>
#include <asm/tdx.h>
#include <asm/mmu_context.h>
+#include <asm/shstk.h>
#include "process.h"
@@ -121,6 +122,7 @@ void exit_thread(struct task_struct *tsk)
free_vm86(t);
+ shstk_free(tsk);
fpu__drop(fpu);
}
@@ -142,6 +144,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
struct inactive_task_frame *frame;
struct fork_frame *fork_frame;
struct pt_regs *childregs;
+ unsigned long new_ssp;
int ret = 0;
childregs = task_pt_regs(p);
@@ -179,7 +182,16 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
frame->flags = X86_EFLAGS_FIXED;
#endif
- fpu_clone(p, clone_flags, args->fn);
+ /*
+ * Allocate a new shadow stack for thread if needed. If shadow stack,
+ * is disabled, new_ssp will remain 0, and fpu_clone() will know not to
+ * update it.
+ */
+ new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size);
+ if (IS_ERR_VALUE(new_ssp))
+ return PTR_ERR((void *)new_ssp);
+
+ fpu_clone(p, clone_flags, args->fn, new_ssp);
/* Kernel thread ? */
if (unlikely(p->flags & PF_KTHREAD)) {
@@ -225,6 +237,13 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
io_bitmap_share(p);
+ /*
+ * If copy_thread() if failing, don't leak the shadow stack possibly
+ * allocated in shstk_alloc_thread_stack() above.
+ */
+ if (ret)
+ shstk_free(p);
+
return ret;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3d181c16a2f6..33b268747bb7 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -515,6 +515,8 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
load_gs_index(__USER_DS);
}
+ reset_thread_features();
+
loadsegment(fs, 0);
loadsegment(es, _ds);
loadsegment(ds, _ds);
@@ -894,6 +896,12 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
else
return put_user(LAM_U57_BITS, (unsigned long __user *)arg2);
#endif
+ case ARCH_SHSTK_ENABLE:
+ case ARCH_SHSTK_DISABLE:
+ case ARCH_SHSTK_LOCK:
+ case ARCH_SHSTK_UNLOCK:
+ case ARCH_SHSTK_STATUS:
+ return shstk_prctl(task, option, arg2);
default:
ret = -EINVAL;
break;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index dfaa270a7cc9..095f04bdabdc 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -58,6 +58,7 @@ enum x86_regset_64 {
REGSET64_FP,
REGSET64_IOPERM,
REGSET64_XSTATE,
+ REGSET64_SSP,
};
#define REGSET_GENERAL \
@@ -1267,6 +1268,17 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
.active = ioperm_active,
.regset_get = ioperm_get
},
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+ [REGSET64_SSP] = {
+ .core_note_type = NT_X86_SHSTK,
+ .n = 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .active = ssp_active,
+ .regset_get = ssp_get,
+ .set = ssp_set
+ },
+#endif
};
static const struct user_regset_view user_x86_64_view = {
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
new file mode 100644
index 000000000000..1f767c509ee9
--- /dev/null
+++ b/arch/x86/kernel/shstk.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * shstk.c - Intel shadow stack support
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ * Yu-cheng Yu <yu-cheng.yu@intel.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/sched/signal.h>
+#include <linux/compat.h>
+#include <linux/sizes.h>
+#include <linux/user.h>
+#include <linux/syscalls.h>
+#include <asm/msr.h>
+#include <asm/fpu/xstate.h>
+#include <asm/fpu/types.h>
+#include <asm/shstk.h>
+#include <asm/special_insns.h>
+#include <asm/fpu/api.h>
+#include <asm/prctl.h>
+
+#define SS_FRAME_SIZE 8
+
+static bool features_enabled(unsigned long features)
+{
+ return current->thread.features & features;
+}
+
+static void features_set(unsigned long features)
+{
+ current->thread.features |= features;
+}
+
+static void features_clr(unsigned long features)
+{
+ current->thread.features &= ~features;
+}
+
+/*
+ * Create a restore token on the shadow stack. A token is always 8-byte
+ * and aligned to 8.
+ */
+static int create_rstor_token(unsigned long ssp, unsigned long *token_addr)
+{
+ unsigned long addr;
+
+ /* Token must be aligned */
+ if (!IS_ALIGNED(ssp, 8))
+ return -EINVAL;
+
+ addr = ssp - SS_FRAME_SIZE;
+
+ /*
+ * SSP is aligned, so reserved bits and mode bit are a zero, just mark
+ * the token 64-bit.
+ */
+ ssp |= BIT(0);
+
+ if (write_user_shstk_64((u64 __user *)addr, (u64)ssp))
+ return -EFAULT;
+
+ if (token_addr)
+ *token_addr = addr;
+
+ return 0;
+}
+
+static unsigned long alloc_shstk(unsigned long addr, unsigned long size,
+ unsigned long token_offset, bool set_res_tok)
+{
+ int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_ABOVE4G;
+ struct mm_struct *mm = current->mm;
+ unsigned long mapped_addr, unused;
+
+ if (addr)
+ flags |= MAP_FIXED_NOREPLACE;
+
+ mmap_write_lock(mm);
+ mapped_addr = do_mmap(NULL, addr, size, PROT_READ, flags,
+ VM_SHADOW_STACK | VM_WRITE, 0, &unused, NULL);
+ mmap_write_unlock(mm);
+
+ if (!set_res_tok || IS_ERR_VALUE(mapped_addr))
+ goto out;
+
+ if (create_rstor_token(mapped_addr + token_offset, NULL)) {
+ vm_munmap(mapped_addr, size);
+ return -EINVAL;
+ }
+
+out:
+ return mapped_addr;
+}
+
+static unsigned long adjust_shstk_size(unsigned long size)
+{
+ if (size)
+ return PAGE_ALIGN(size);
+
+ return PAGE_ALIGN(min_t(unsigned long long, rlimit(RLIMIT_STACK), SZ_4G));
+}
+
+static void unmap_shadow_stack(u64 base, u64 size)
+{
+ while (1) {
+ int r;
+
+ r = vm_munmap(base, size);
+
+ /*
+ * vm_munmap() returns -EINTR when mmap_lock is held by
+ * something else, and that lock should not be held for a
+ * long time. Retry it for the case.
+ */
+ if (r == -EINTR) {
+ cond_resched();
+ continue;
+ }
+
+ /*
+ * For all other types of vm_munmap() failure, either the
+ * system is out of memory or there is bug.
+ */
+ WARN_ON_ONCE(r);
+ break;
+ }
+}
+
+static int shstk_setup(void)
+{
+ struct thread_shstk *shstk = &current->thread.shstk;
+ unsigned long addr, size;
+
+ /* Already enabled */
+ if (features_enabled(ARCH_SHSTK_SHSTK))
+ return 0;
+
+ /* Also not supported for 32 bit and x32 */
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || in_32bit_syscall())
+ return -EOPNOTSUPP;
+
+ size = adjust_shstk_size(0);
+ addr = alloc_shstk(0, size, 0, false);
+ if (IS_ERR_VALUE(addr))
+ return PTR_ERR((void *)addr);
+
+ fpregs_lock_and_load();
+ wrmsrl(MSR_IA32_PL3_SSP, addr + size);
+ wrmsrl(MSR_IA32_U_CET, CET_SHSTK_EN);
+ fpregs_unlock();
+
+ shstk->base = addr;
+ shstk->size = size;
+ features_set(ARCH_SHSTK_SHSTK);
+
+ return 0;
+}
+
+void reset_thread_features(void)
+{
+ memset(&current->thread.shstk, 0, sizeof(struct thread_shstk));
+ current->thread.features = 0;
+ current->thread.features_locked = 0;
+}
+
+unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags,
+ unsigned long stack_size)
+{
+ struct thread_shstk *shstk = &tsk->thread.shstk;
+ unsigned long addr, size;
+
+ /*
+ * If shadow stack is not enabled on the new thread, skip any
+ * switch to a new shadow stack.
+ */
+ if (!features_enabled(ARCH_SHSTK_SHSTK))
+ return 0;
+
+ /*
+ * For CLONE_VM, except vfork, the child needs a separate shadow
+ * stack.
+ */
+ if ((clone_flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM)
+ return 0;
+
+ size = adjust_shstk_size(stack_size);
+ addr = alloc_shstk(0, size, 0, false);
+ if (IS_ERR_VALUE(addr))
+ return addr;
+
+ shstk->base = addr;
+ shstk->size = size;
+
+ return addr + size;
+}
+
+static unsigned long get_user_shstk_addr(void)
+{
+ unsigned long long ssp;
+
+ fpregs_lock_and_load();
+
+ rdmsrl(MSR_IA32_PL3_SSP, ssp);
+
+ fpregs_unlock();
+
+ return ssp;
+}
+
+#define SHSTK_DATA_BIT BIT(63)
+
+static int put_shstk_data(u64 __user *addr, u64 data)
+{
+ if (WARN_ON_ONCE(data & SHSTK_DATA_BIT))
+ return -EINVAL;
+
+ /*
+ * Mark the high bit so that the sigframe can't be processed as a
+ * return address.
+ */
+ if (write_user_shstk_64(addr, data | SHSTK_DATA_BIT))
+ return -EFAULT;
+ return 0;
+}
+
+static int get_shstk_data(unsigned long *data, unsigned long __user *addr)
+{
+ unsigned long ldata;
+
+ if (unlikely(get_user(ldata, addr)))
+ return -EFAULT;
+
+ if (!(ldata & SHSTK_DATA_BIT))
+ return -EINVAL;
+
+ *data = ldata & ~SHSTK_DATA_BIT;
+
+ return 0;
+}
+
+static int shstk_push_sigframe(unsigned long *ssp)
+{
+ unsigned long target_ssp = *ssp;
+
+ /* Token must be aligned */
+ if (!IS_ALIGNED(target_ssp, 8))
+ return -EINVAL;
+
+ *ssp -= SS_FRAME_SIZE;
+ if (put_shstk_data((void *__user)*ssp, target_ssp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int shstk_pop_sigframe(unsigned long *ssp)
+{
+ unsigned long token_addr;
+ int err;
+
+ err = get_shstk_data(&token_addr, (unsigned long __user *)*ssp);
+ if (unlikely(err))
+ return err;
+
+ /* Restore SSP aligned? */
+ if (unlikely(!IS_ALIGNED(token_addr, 8)))
+ return -EINVAL;
+
+ /* SSP in userspace? */
+ if (unlikely(token_addr >= TASK_SIZE_MAX))
+ return -EINVAL;
+
+ *ssp = token_addr;
+
+ return 0;
+}
+
+int setup_signal_shadow_stack(struct ksignal *ksig)
+{
+ void __user *restorer = ksig->ka.sa.sa_restorer;
+ unsigned long ssp;
+ int err;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
+ !features_enabled(ARCH_SHSTK_SHSTK))
+ return 0;
+
+ if (!restorer)
+ return -EINVAL;
+
+ ssp = get_user_shstk_addr();
+ if (unlikely(!ssp))
+ return -EINVAL;
+
+ err = shstk_push_sigframe(&ssp);
+ if (unlikely(err))
+ return err;
+
+ /* Push restorer address */
+ ssp -= SS_FRAME_SIZE;
+ err = write_user_shstk_64((u64 __user *)ssp, (u64)restorer);
+ if (unlikely(err))
+ return -EFAULT;
+
+ fpregs_lock_and_load();
+ wrmsrl(MSR_IA32_PL3_SSP, ssp);
+ fpregs_unlock();
+
+ return 0;
+}
+
+int restore_signal_shadow_stack(void)
+{
+ unsigned long ssp;
+ int err;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
+ !features_enabled(ARCH_SHSTK_SHSTK))
+ return 0;
+
+ ssp = get_user_shstk_addr();
+ if (unlikely(!ssp))
+ return -EINVAL;
+
+ err = shstk_pop_sigframe(&ssp);
+ if (unlikely(err))
+ return err;
+
+ fpregs_lock_and_load();
+ wrmsrl(MSR_IA32_PL3_SSP, ssp);
+ fpregs_unlock();
+
+ return 0;
+}
+
+void shstk_free(struct task_struct *tsk)
+{
+ struct thread_shstk *shstk = &tsk->thread.shstk;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) ||
+ !features_enabled(ARCH_SHSTK_SHSTK))
+ return;
+
+ /*
+ * When fork() with CLONE_VM fails, the child (tsk) already has a
+ * shadow stack allocated, and exit_thread() calls this function to
+ * free it. In this case the parent (current) and the child share
+ * the same mm struct.
+ */
+ if (!tsk->mm || tsk->mm != current->mm)
+ return;
+
+ unmap_shadow_stack(shstk->base, shstk->size);
+}
+
+static int wrss_control(bool enable)
+{
+ u64 msrval;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return -EOPNOTSUPP;
+
+ /*
+ * Only enable WRSS if shadow stack is enabled. If shadow stack is not
+ * enabled, WRSS will already be disabled, so don't bother clearing it
+ * when disabling.
+ */
+ if (!features_enabled(ARCH_SHSTK_SHSTK))
+ return -EPERM;
+
+ /* Already enabled/disabled? */
+ if (features_enabled(ARCH_SHSTK_WRSS) == enable)
+ return 0;
+
+ fpregs_lock_and_load();
+ rdmsrl(MSR_IA32_U_CET, msrval);
+
+ if (enable) {
+ features_set(ARCH_SHSTK_WRSS);
+ msrval |= CET_WRSS_EN;
+ } else {
+ features_clr(ARCH_SHSTK_WRSS);
+ if (!(msrval & CET_WRSS_EN))
+ goto unlock;
+
+ msrval &= ~CET_WRSS_EN;
+ }
+
+ wrmsrl(MSR_IA32_U_CET, msrval);
+
+unlock:
+ fpregs_unlock();
+
+ return 0;
+}
+
+static int shstk_disable(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return -EOPNOTSUPP;
+
+ /* Already disabled? */
+ if (!features_enabled(ARCH_SHSTK_SHSTK))
+ return 0;
+
+ fpregs_lock_and_load();
+ /* Disable WRSS too when disabling shadow stack */
+ wrmsrl(MSR_IA32_U_CET, 0);
+ wrmsrl(MSR_IA32_PL3_SSP, 0);
+ fpregs_unlock();
+
+ shstk_free(current);
+ features_clr(ARCH_SHSTK_SHSTK | ARCH_SHSTK_WRSS);
+
+ return 0;
+}
+
+SYSCALL_DEFINE3(map_shadow_stack, unsigned long, addr, unsigned long, size, unsigned int, flags)
+{
+ bool set_tok = flags & SHADOW_STACK_SET_TOKEN;
+ unsigned long aligned_size;
+
+ if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
+ return -EOPNOTSUPP;
+
+ if (flags & ~SHADOW_STACK_SET_TOKEN)
+ return -EINVAL;
+
+ /* If there isn't space for a token */
+ if (set_tok && size < 8)
+ return -ENOSPC;
+
+ if (addr && addr < SZ_4G)
+ return -ERANGE;
+
+ /*
+ * An overflow would result in attempting to write the restore token
+ * to the wrong location. Not catastrophic, but just return the right
+ * error code and block it.
+ */
+ aligned_size = PAGE_ALIGN(size);
+ if (aligned_size < size)
+ return -EOVERFLOW;
+
+ return alloc_shstk(addr, aligned_size, size, set_tok);
+}
+
+long shstk_prctl(struct task_struct *task, int option, unsigned long arg2)
+{
+ unsigned long features = arg2;
+
+ if (option == ARCH_SHSTK_STATUS) {
+ return put_user(task->thread.features, (unsigned long __user *)arg2);
+ }
+
+ if (option == ARCH_SHSTK_LOCK) {
+ task->thread.features_locked |= features;
+ return 0;
+ }
+
+ /* Only allow via ptrace */
+ if (task != current) {
+ if (option == ARCH_SHSTK_UNLOCK && IS_ENABLED(CONFIG_CHECKPOINT_RESTORE)) {
+ task->thread.features_locked &= ~features;
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ /* Do not allow to change locked features */
+ if (features & task->thread.features_locked)
+ return -EPERM;
+
+ /* Only support enabling/disabling one feature at a time. */
+ if (hweight_long(features) > 1)
+ return -EINVAL;
+
+ if (option == ARCH_SHSTK_DISABLE) {
+ if (features & ARCH_SHSTK_WRSS)
+ return wrss_control(false);
+ if (features & ARCH_SHSTK_SHSTK)
+ return shstk_disable();
+ return -EINVAL;
+ }
+
+ /* Handle ARCH_SHSTK_ENABLE */
+ if (features & ARCH_SHSTK_SHSTK)
+ return shstk_setup();
+ if (features & ARCH_SHSTK_WRSS)
+ return wrss_control(true);
+ return -EINVAL;
+}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 004cb30b7419..356253e85ce9 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -40,6 +40,7 @@
#include <asm/syscall.h>
#include <asm/sigframe.h>
#include <asm/signal.h>
+#include <asm/shstk.h>
static inline int is_ia32_compat_frame(struct ksignal *ksig)
{
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 9027fc088f97..c12624bc82a3 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -402,7 +402,7 @@ Efault:
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 13a1e6083837..cacf2ede6217 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -175,6 +175,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
uc_flags = frame_uc_flags(regs);
+ if (setup_signal_shadow_stack(ksig))
+ return -EFAULT;
+
if (!user_access_begin(frame, sizeof(*frame)))
return -EFAULT;
@@ -260,6 +263,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
goto badframe;
+ if (restore_signal_shadow_stack())
+ goto badframe;
+
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
@@ -403,7 +409,7 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
*/
static_assert(NSIGILL == 11);
static_assert(NSIGFPE == 15);
-static_assert(NSIGSEGV == 9);
+static_assert(NSIGSEGV == 10);
static_assert(NSIGBUS == 5);
static_assert(NSIGTRAP == 6);
static_assert(NSIGCHLD == 6);
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 8cc653ffdccd..c783aeb37dce 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -193,7 +193,11 @@ get_unmapped_area:
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
- info.low_limit = PAGE_SIZE;
+ if (!in_32bit_syscall() && (flags & MAP_ABOVE4G))
+ info.low_limit = SZ_4G;
+ else
+ info.low_limit = PAGE_SIZE;
+
info.high_limit = get_mmap_base(0);
/*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 58b1f208eff5..f358350624b2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -77,18 +77,6 @@
DECLARE_BITMAP(system_vectors, NR_VECTORS);
-static inline void cond_local_irq_enable(struct pt_regs *regs)
-{
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_enable();
-}
-
-static inline void cond_local_irq_disable(struct pt_regs *regs)
-{
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_disable();
-}
-
__always_inline int is_valid_bugaddr(unsigned long addr)
{
if (addr < TASK_SIZE_MAX)
@@ -213,81 +201,6 @@ DEFINE_IDTENTRY(exc_overflow)
do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
}
-#ifdef CONFIG_X86_KERNEL_IBT
-
-static __ro_after_init bool ibt_fatal = true;
-
-extern void ibt_selftest_ip(void); /* code label defined in asm below */
-
-enum cp_error_code {
- CP_EC = (1 << 15) - 1,
-
- CP_RET = 1,
- CP_IRET = 2,
- CP_ENDBR = 3,
- CP_RSTRORSSP = 4,
- CP_SETSSBSY = 5,
-
- CP_ENCL = 1 << 15,
-};
-
-DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)
-{
- if (!cpu_feature_enabled(X86_FEATURE_IBT)) {
- pr_err("Unexpected #CP\n");
- BUG();
- }
-
- if (WARN_ON_ONCE(user_mode(regs) || (error_code & CP_EC) != CP_ENDBR))
- return;
-
- if (unlikely(regs->ip == (unsigned long)&ibt_selftest_ip)) {
- regs->ax = 0;
- return;
- }
-
- pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
- if (!ibt_fatal) {
- printk(KERN_DEFAULT CUT_HERE);
- __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
- return;
- }
- BUG();
-}
-
-/* Must be noinline to ensure uniqueness of ibt_selftest_ip. */
-noinline bool ibt_selftest(void)
-{
- unsigned long ret;
-
- asm (" lea ibt_selftest_ip(%%rip), %%rax\n\t"
- ANNOTATE_RETPOLINE_SAFE
- " jmp *%%rax\n\t"
- "ibt_selftest_ip:\n\t"
- UNWIND_HINT_FUNC
- ANNOTATE_NOENDBR
- " nop\n\t"
-
- : "=a" (ret) : : "memory");
-
- return !ret;
-}
-
-static int __init ibt_setup(char *str)
-{
- if (!strcmp(str, "off"))
- setup_clear_cpu_cap(X86_FEATURE_IBT);
-
- if (!strcmp(str, "warn"))
- ibt_fatal = false;
-
- return 1;
-}
-
-__setup("ibt=", ibt_setup);
-
-#endif /* CONFIG_X86_KERNEL_IBT */
-
#ifdef CONFIG_X86_F00F_BUG
void handle_invalid_op(struct pt_regs *regs)
#else
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e4399983c50c..fe68119ce2cc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1118,8 +1118,22 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
(error_code & X86_PF_INSTR), foreign))
return 1;
+ /*
+ * Shadow stack accesses (PF_SHSTK=1) are only permitted to
+ * shadow stack VMAs. All other accesses result in an error.
+ */
+ if (error_code & X86_PF_SHSTK) {
+ if (unlikely(!(vma->vm_flags & VM_SHADOW_STACK)))
+ return 1;
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
+ return 1;
+ return 0;
+ }
+
if (error_code & X86_PF_WRITE) {
/* write, present and write, not present: */
+ if (unlikely(vma->vm_flags & VM_SHADOW_STACK))
+ return 1;
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
return 0;
@@ -1311,6 +1325,14 @@ void do_user_addr_fault(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+ /*
+ * Read-only permissions can not be expressed in shadow stack PTEs.
+ * Treat all shadow stack accesses as WRITE faults. This ensures
+ * that the MM will prepare everything (e.g., break COW) such that
+ * maybe_mkwrite() can create a proper shadow stack PTE.
+ */
+ if (error_code & X86_PF_SHSTK)
+ flags |= FAULT_FLAG_WRITE;
if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
if (error_code & X86_PF_INSTR)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 7159cf787613..fc627acfe40e 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2073,12 +2073,12 @@ int set_memory_nx(unsigned long addr, int numpages)
int set_memory_ro(unsigned long addr, int numpages)
{
- return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
+ return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW | _PAGE_DIRTY), 0);
}
int set_memory_rox(unsigned long addr, int numpages)
{
- pgprot_t clr = __pgprot(_PAGE_RW);
+ pgprot_t clr = __pgprot(_PAGE_RW | _PAGE_DIRTY);
if (__supported_pte_mask & _PAGE_NX)
clr.pgprot |= _PAGE_NX;
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e4f499eb0f29..afab0bc7862b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -880,3 +880,41 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+
+pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return pte_mkwrite_shstk(pte);
+
+ pte = pte_mkwrite_kernel(pte);
+
+ if (pte_dirty(pte))
+ pte = pte_clear_saveddirty(pte);
+
+ return pte;
+}
+
+pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return pmd_mkwrite_shstk(pmd);
+
+ pmd = pmd_set_flags(pmd, _PAGE_RW);
+
+ if (pmd_dirty(pmd))
+ pmd = pmd_clear_saveddirty(pmd);
+
+ return pmd;
+}
+
+void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte)
+{
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
+ pte_shstk(pte));
+}
+
+void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
+{
+ VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
+ pmd_shstk(pmd));
+}
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 093b78c8bbec..5a034a994682 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -640,7 +640,7 @@ static struct trap_array_entry trap_array[] = {
TRAP_ENTRY(exc_coprocessor_error, false ),
TRAP_ENTRY(exc_alignment_check, false ),
TRAP_ENTRY(exc_simd_coprocessor_error, false ),
-#ifdef CONFIG_X86_KERNEL_IBT
+#ifdef CONFIG_X86_CET
TRAP_ENTRY(exc_control_protection, false ),
#endif
};
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index b3b8d289b9ab..fdc91deece7e 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -150,7 +150,7 @@ void make_lowmem_page_readwrite(void *vaddr)
if (pte == NULL)
return; /* vaddr missing */
- ptev = pte_mkwrite(*pte);
+ ptev = pte_mkwrite_kernel(*pte);
if (HYPERVISOR_update_va_mapping(address, ptev, 0))
BUG();
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 08f1ceb9eb81..9e5e68008785 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -148,7 +148,7 @@ xen_pv_trap asm_exc_page_fault
xen_pv_trap asm_exc_spurious_interrupt_bug
xen_pv_trap asm_exc_coprocessor_error
xen_pv_trap asm_exc_alignment_check
-#ifdef CONFIG_X86_KERNEL_IBT
+#ifdef CONFIG_X86_CET
xen_pv_trap asm_exc_control_protection
#endif
#ifdef CONFIG_X86_MCE
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index fc7a14884c6c..d72632d9c53c 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -262,7 +262,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte)
{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
#define pgprot_noncached(prot) \
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 33a8366e22a5..0db2021f7477 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,16 +4,23 @@
# subsystems should select the appropriate symbols.
config REGMAP
+ bool "Register Map support" if KUNIT_ALL_TESTS
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
select IRQ_DOMAIN if REGMAP_IRQ
select MDIO_BUS if REGMAP_MDIO
- bool
+ help
+ Enable support for the Register Map (regmap) access API.
+
+ Usually, this option is automatically selected when needed.
+ However, you may want to enable it manually for running the regmap
+ KUnit tests.
+
+ If unsure, say N.
config REGMAP_KUNIT
tristate "KUnit tests for regmap"
- depends on KUNIT
+ depends on KUNIT && REGMAP
default KUNIT_ALL_TESTS
- select REGMAP
select REGMAP_RAM
config REGMAP_AC97
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index b99bb2369fff..362926d155a4 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -116,8 +116,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < d->chip->num_regs; i++) {
if (d->mask_base) {
if (d->chip->handle_mask_sync)
- d->chip->handle_mask_sync(d->map, i,
- d->mask_buf_def[i],
+ d->chip->handle_mask_sync(i, d->mask_buf_def[i],
d->mask_buf[i],
d->chip->irq_drv_data);
else {
@@ -920,7 +919,7 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
if (d->mask_base) {
if (chip->handle_mask_sync) {
- ret = chip->handle_mask_sync(d->map, i,
+ ret = chip->handle_mask_sync(i,
d->mask_buf_def[i],
d->mask_buf[i],
chip->irq_drv_data);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index c10a4aa97373..cd48033b804a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -571,6 +571,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+ /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+ if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+ return 0;
+
return tpm_get_random(chip, data, max);
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 4463d0018290..586ca10b0d72 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev)
}
suspended:
+ chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+
if (rc)
dev_err(dev, "Ignoring error %d while suspending\n", rc);
return 0;
@@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev)
if (chip == NULL)
return -ENODEV;
+ chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
+
+ /*
+ * Guarantee that SUSPENDED is written last, so that hwrng does not
+ * activate before the chip has been fully resumed.
+ */
+ wmb();
+
return 0;
}
EXPORT_SYMBOL_GPL(tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 02945d53fcef..558144fa707a 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1209,25 +1209,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
- if (chip->ops->clk_enable != NULL)
- chip->ops->clk_enable(chip, true);
-
- /* reenable interrupts that device may have lost or
- * BIOS/firmware may have disabled
+ /*
+ * Re-enable interrupts that device may have lost or BIOS/firmware may
+ * have disabled.
*/
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
- if (rc < 0)
- goto out;
+ if (rc < 0) {
+ dev_err(&chip->dev, "Setting IRQ failed.\n");
+ return;
+ }
intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
-
- tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
-
-out:
- if (chip->ops->clk_enable != NULL)
- chip->ops->clk_enable(chip, false);
-
- return;
+ rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ if (rc < 0)
+ dev_err(&chip->dev, "Enabling interrupts failed.\n");
}
int tpm_tis_resume(struct device *dev)
@@ -1235,27 +1230,27 @@ int tpm_tis_resume(struct device *dev)
struct tpm_chip *chip = dev_get_drvdata(dev);
int ret;
- ret = tpm_tis_request_locality(chip, 0);
- if (ret < 0)
+ ret = tpm_chip_start(chip);
+ if (ret)
return ret;
if (chip->flags & TPM_CHIP_FLAG_IRQ)
tpm_tis_reenable_interrupts(chip);
- ret = tpm_pm_resume(dev);
- if (ret)
- goto out;
-
/*
* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
tpm1_do_selftest(chip);
-out:
- tpm_tis_relinquish_locality(chip, 0);
- return ret;
+ tpm_chip_stop(chip);
+
+ ret = tpm_pm_resume(dev);
+ if (ret)
+ return ret;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);
#endif
diff --git a/drivers/clk/microchip/Kconfig b/drivers/clk/microchip/Kconfig
index e33e51978938..0724ce65898f 100644
--- a/drivers/clk/microchip/Kconfig
+++ b/drivers/clk/microchip/Kconfig
@@ -5,8 +5,8 @@ config COMMON_CLK_PIC32
config MCHP_CLK_MPFS
bool "Clk driver for PolarFire SoC"
- depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
- default SOC_MICROCHIP_POLARFIRE
+ depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
+ default ARCH_MICROCHIP_POLARFIRE
select AUXILIARY_BUS
help
Supports Clock Configuration for PolarFire SoC
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index b72c76f9ecd1..eabfc4931fe9 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -184,7 +184,7 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
0, 24000000);
/* fixed rate (optional) clock */
- if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
+ if (of_property_read_bool(np, "microchip,pic32mzda-sosc")) {
pr_info("pic32-clk: dt requests SOSC.\n");
clks[SOSCCLK] = pic32_sosc_clk_register(&sosc_clk, core);
}
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 4228be917038..3000816f8f95 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -21,9 +21,9 @@ config 104_QUAD_8
encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
A counter's respective error flag may be cleared by performing a write
- operation on the respective count value attribute. Although the
- 104-QUAD-8 counters have a 25-bit range, only the lower 24 bits may be
- set, either directly or via the counter's preset attribute.
+ operation on the respective count value attribute. The 104-QUAD-8
+ counters may be set either directly or via the counter's preset
+ attribute.
The base port addresses for the devices may be configured via the base
array module parameter. The interrupt line numbers for the devices may
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 9bf20a5d6bda..6206d2dc3d47 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -342,6 +342,9 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ /* Reset input selector to its default input */
+ regmap_write(priv->regmap, TIM_TISEL, 0x0);
+
/* Register Counter device */
ret = devm_counter_add(dev, counter);
if (ret < 0)
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 338cf6cc6596..14aa8281c7f4 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -85,6 +85,8 @@ static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "st-ericsson,u9500", },
{ .compatible = "st-ericsson,u9540", },
+ { .compatible = "starfive,jh7110", },
+
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap4", },
{ .compatible = "ti,omap5", },
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index ee7059399e9c..b7e488c1b5df 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -41,7 +41,7 @@ struct dma_heap_attachment {
bool mapped;
};
-#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_RETRY_MAYFAIL)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
@@ -350,6 +350,9 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
struct page *page, *tmp_page;
int i, ret = -ENOMEM;
+ if (len / PAGE_SIZE > totalram_pages())
+ return ERR_PTR(-ENOMEM);
+
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 290186e44e6b..0ef1971d22bb 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -185,6 +185,7 @@ config EXTCON_USBC_TUSB320
tristate "TI TUSB320 USB-C extcon support"
depends on I2C && TYPEC
select REGMAP_I2C
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
help
Say Y here to enable support for USB Type C cable detection extcon
support using a TUSB320.
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 180be768c215..a703a8315634 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -393,7 +393,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
adev = acpi_dev_get_first_match_dev("INT3496", NULL, -1);
if (adev) {
info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
- put_device(&adev->dev);
+ acpi_dev_put(adev);
if (IS_ERR(info->id_extcon))
return PTR_ERR(info->id_extcon);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 32f8b541770b..d339b8680445 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -18,7 +18,6 @@
#include <linux/mfd/palmas.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/workqueue.h>
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index eb02cb962b5e..f72e90ceca53 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -123,7 +123,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
if (ret)
return ret;
- info->id_irq = platform_get_irq_byname(pdev, "usb_id");
+ info->id_irq = platform_get_irq_byname_optional(pdev, "usb_id");
if (info->id_irq > 0) {
ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
qcom_usb_irq_handler,
@@ -136,7 +136,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
}
- info->vbus_irq = platform_get_irq_byname(pdev, "usb_vbus");
+ info->vbus_irq = platform_get_irq_byname_optional(pdev, "usb_vbus");
if (info->vbus_irq > 0) {
ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
qcom_usb_irq_handler,
diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c
index b408ce989c22..cc2d0ac7c5f6 100644
--- a/drivers/extcon/extcon-usbc-tusb320.c
+++ b/drivers/extcon/extcon-usbc-tusb320.c
@@ -15,6 +15,8 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/role.h>
#define TUSB320_REG8 0x8
#define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6)
@@ -26,16 +28,16 @@
#define TUSB320_REG8_CURRENT_MODE_DETECT_MED 0x1
#define TUSB320_REG8_CURRENT_MODE_DETECT_ACC 0x2
#define TUSB320_REG8_CURRENT_MODE_DETECT_HI 0x3
-#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 2)
+#define TUSB320_REG8_ACCESSORY_CONNECTED GENMASK(3, 1)
#define TUSB320_REG8_ACCESSORY_CONNECTED_NONE 0x0
#define TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO 0x4
-#define TUSB320_REG8_ACCESSORY_CONNECTED_ACC 0x5
-#define TUSB320_REG8_ACCESSORY_CONNECTED_DEBUG 0x6
+#define TUSB320_REG8_ACCESSORY_CONNECTED_ACHRG 0x5
+#define TUSB320_REG8_ACCESSORY_CONNECTED_DBGDFP 0x6
+#define TUSB320_REG8_ACCESSORY_CONNECTED_DBGUFP 0x7
#define TUSB320_REG8_ACTIVE_CABLE_DETECTION BIT(0)
#define TUSB320_REG9 0x9
-#define TUSB320_REG9_ATTACHED_STATE_SHIFT 6
-#define TUSB320_REG9_ATTACHED_STATE_MASK 0x3
+#define TUSB320_REG9_ATTACHED_STATE GENMASK(7, 6)
#define TUSB320_REG9_CABLE_DIRECTION BIT(5)
#define TUSB320_REG9_INTERRUPT_STATUS BIT(4)
@@ -78,6 +80,8 @@ struct tusb320_priv {
struct typec_capability cap;
enum typec_port_type port_type;
enum typec_pwr_opmode pwr_opmode;
+ struct fwnode_handle *connector_fwnode;
+ struct usb_role_switch *role_sw;
};
static const char * const tusb_attached_states[] = {
@@ -249,8 +253,7 @@ static void tusb320_extcon_irq_handler(struct tusb320_priv *priv, u8 reg)
{
int state, polarity;
- state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
- TUSB320_REG9_ATTACHED_STATE_MASK;
+ state = FIELD_GET(TUSB320_REG9_ATTACHED_STATE, reg);
polarity = !!(reg & TUSB320_REG9_CABLE_DIRECTION);
dev_dbg(priv->dev, "attached state: %s, polarity: %d\n",
@@ -276,32 +279,86 @@ static void tusb320_typec_irq_handler(struct tusb320_priv *priv, u8 reg9)
{
struct typec_port *port = priv->port;
struct device *dev = priv->dev;
- u8 mode, role, state;
+ int typec_mode;
+ enum usb_role usb_role;
+ enum typec_role pwr_role;
+ enum typec_data_role data_role;
+ u8 state, mode, accessory;
int ret, reg8;
bool ori;
+ ret = regmap_read(priv->regmap, TUSB320_REG8, &reg8);
+ if (ret) {
+ dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
+ return;
+ }
+
ori = reg9 & TUSB320_REG9_CABLE_DIRECTION;
typec_set_orientation(port, ori ? TYPEC_ORIENTATION_REVERSE :
TYPEC_ORIENTATION_NORMAL);
- state = (reg9 >> TUSB320_REG9_ATTACHED_STATE_SHIFT) &
- TUSB320_REG9_ATTACHED_STATE_MASK;
- if (state == TUSB320_ATTACHED_STATE_DFP)
- role = TYPEC_SOURCE;
- else
- role = TYPEC_SINK;
+ state = FIELD_GET(TUSB320_REG9_ATTACHED_STATE, reg9);
+ accessory = FIELD_GET(TUSB320_REG8_ACCESSORY_CONNECTED, reg8);
+
+ switch (state) {
+ case TUSB320_ATTACHED_STATE_DFP:
+ typec_mode = TYPEC_MODE_USB2;
+ usb_role = USB_ROLE_HOST;
+ pwr_role = TYPEC_SOURCE;
+ data_role = TYPEC_HOST;
+ break;
+ case TUSB320_ATTACHED_STATE_UFP:
+ typec_mode = TYPEC_MODE_USB2;
+ usb_role = USB_ROLE_DEVICE;
+ pwr_role = TYPEC_SINK;
+ data_role = TYPEC_DEVICE;
+ break;
+ case TUSB320_ATTACHED_STATE_ACC:
+ /*
+ * Accessory detected. For debug accessories, just make some
+ * qualified guesses as to the role for lack of a better option.
+ */
+ if (accessory == TUSB320_REG8_ACCESSORY_CONNECTED_AUDIO ||
+ accessory == TUSB320_REG8_ACCESSORY_CONNECTED_ACHRG) {
+ typec_mode = TYPEC_MODE_AUDIO;
+ usb_role = USB_ROLE_NONE;
+ pwr_role = TYPEC_SINK;
+ data_role = TYPEC_DEVICE;
+ break;
+ } else if (accessory ==
+ TUSB320_REG8_ACCESSORY_CONNECTED_DBGDFP) {
+ typec_mode = TYPEC_MODE_DEBUG;
+ pwr_role = TYPEC_SOURCE;
+ usb_role = USB_ROLE_HOST;
+ data_role = TYPEC_HOST;
+ break;
+ } else if (accessory ==
+ TUSB320_REG8_ACCESSORY_CONNECTED_DBGUFP) {
+ typec_mode = TYPEC_MODE_DEBUG;
+ pwr_role = TYPEC_SINK;
+ usb_role = USB_ROLE_DEVICE;
+ data_role = TYPEC_DEVICE;
+ break;
+ }
- typec_set_vconn_role(port, role);
- typec_set_pwr_role(port, role);
- typec_set_data_role(port, role == TYPEC_SOURCE ?
- TYPEC_HOST : TYPEC_DEVICE);
+ dev_warn(priv->dev, "unexpected ACCESSORY_CONNECTED state %d\n",
+ accessory);
- ret = regmap_read(priv->regmap, TUSB320_REG8, &reg8);
- if (ret) {
- dev_err(dev, "error during reg8 i2c read, ret=%d!\n", ret);
- return;
+ fallthrough;
+ default:
+ typec_mode = TYPEC_MODE_USB2;
+ usb_role = USB_ROLE_NONE;
+ pwr_role = TYPEC_SINK;
+ data_role = TYPEC_DEVICE;
+ break;
}
+ typec_set_vconn_role(port, pwr_role);
+ typec_set_pwr_role(port, pwr_role);
+ typec_set_data_role(port, data_role);
+ typec_set_mode(port, typec_mode);
+ usb_role_switch_set_role(priv->role_sw, usb_role);
+
mode = FIELD_GET(TUSB320_REG8_CURRENT_MODE_DETECT, reg8);
if (mode == TUSB320_REG8_CURRENT_MODE_DETECT_DEF)
typec_set_pwr_opmode(port, TYPEC_PWR_MODE_USB);
@@ -391,27 +448,25 @@ static int tusb320_typec_probe(struct i2c_client *client,
/* Type-C connector found. */
ret = typec_get_fw_cap(&priv->cap, connector);
if (ret)
- return ret;
+ goto err_put;
priv->port_type = priv->cap.type;
/* This goes into register 0x8 field CURRENT_MODE_ADVERTISE */
ret = fwnode_property_read_string(connector, "typec-power-opmode", &cap_str);
if (ret)
- return ret;
+ goto err_put;
ret = typec_find_pwr_opmode(cap_str);
if (ret < 0)
- return ret;
- if (ret == TYPEC_PWR_MODE_PD)
- return -EINVAL;
+ goto err_put;
priv->pwr_opmode = ret;
/* Initialize the hardware with the devicetree settings. */
ret = tusb320_set_adv_pwr_mode(priv);
if (ret)
- return ret;
+ goto err_put;
priv->cap.revision = USB_TYPEC_REV_1_1;
priv->cap.accessory[0] = TYPEC_ACCESSORY_AUDIO;
@@ -422,10 +477,36 @@ static int tusb320_typec_probe(struct i2c_client *client,
priv->cap.fwnode = connector;
priv->port = typec_register_port(&client->dev, &priv->cap);
- if (IS_ERR(priv->port))
- return PTR_ERR(priv->port);
+ if (IS_ERR(priv->port)) {
+ ret = PTR_ERR(priv->port);
+ goto err_put;
+ }
+
+ /* Find any optional USB role switch that needs reporting to */
+ priv->role_sw = fwnode_usb_role_switch_get(connector);
+ if (IS_ERR(priv->role_sw)) {
+ ret = PTR_ERR(priv->role_sw);
+ goto err_unreg;
+ }
+
+ priv->connector_fwnode = connector;
return 0;
+
+err_unreg:
+ typec_unregister_port(priv->port);
+
+err_put:
+ fwnode_handle_put(connector);
+
+ return ret;
+}
+
+static void tusb320_typec_remove(struct tusb320_priv *priv)
+{
+ usb_role_switch_put(priv->role_sw);
+ typec_unregister_port(priv->port);
+ fwnode_handle_put(priv->connector_fwnode);
}
static int tusb320_probe(struct i2c_client *client)
@@ -438,7 +519,9 @@ static int tusb320_probe(struct i2c_client *client)
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+
priv->dev = &client->dev;
+ i2c_set_clientdata(client, priv);
priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config);
if (IS_ERR(priv->regmap))
@@ -489,10 +572,19 @@ static int tusb320_probe(struct i2c_client *client)
tusb320_irq_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, priv);
+ if (ret)
+ tusb320_typec_remove(priv);
return ret;
}
+static void tusb320_remove(struct i2c_client *client)
+{
+ struct tusb320_priv *priv = i2c_get_clientdata(client);
+
+ tusb320_typec_remove(priv);
+}
+
static const struct of_device_id tusb320_extcon_dt_match[] = {
{ .compatible = "ti,tusb320", .data = &tusb320_ops, },
{ .compatible = "ti,tusb320l", .data = &tusb320l_ops, },
@@ -502,6 +594,7 @@ MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match);
static struct i2c_driver tusb320_extcon_driver = {
.probe_new = tusb320_probe,
+ .remove = tusb320_remove,
.driver = {
.name = "extcon-tusb320",
.of_match_table = tusb320_extcon_dt_match,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index d43ba8e7260d..6f7a60d2ed91 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/idr.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fs.h>
@@ -206,6 +207,14 @@ static const struct __extcon_info {
* @attr_name: "name" sysfs entry
* @attr_state: "state" sysfs entry
* @attrs: the array pointing to attr_name and attr_state for attr_g
+ * @usb_propval: the array of USB connector properties
+ * @chg_propval: the array of charger connector properties
+ * @jack_propval: the array of jack connector properties
+ * @disp_propval: the array of display connector properties
+ * @usb_bits: the bit array of the USB connector property capabilities
+ * @chg_bits: the bit array of the charger connector property capabilities
+ * @jack_bits: the bit array of the jack connector property capabilities
+ * @disp_bits: the bit array of the display connector property capabilities
*/
struct extcon_cable {
struct extcon_dev *edev;
@@ -222,20 +231,21 @@ struct extcon_cable {
union extcon_property_value jack_propval[EXTCON_PROP_JACK_CNT];
union extcon_property_value disp_propval[EXTCON_PROP_DISP_CNT];
- unsigned long usb_bits[BITS_TO_LONGS(EXTCON_PROP_USB_CNT)];
- unsigned long chg_bits[BITS_TO_LONGS(EXTCON_PROP_CHG_CNT)];
- unsigned long jack_bits[BITS_TO_LONGS(EXTCON_PROP_JACK_CNT)];
- unsigned long disp_bits[BITS_TO_LONGS(EXTCON_PROP_DISP_CNT)];
+ DECLARE_BITMAP(usb_bits, EXTCON_PROP_USB_CNT);
+ DECLARE_BITMAP(chg_bits, EXTCON_PROP_CHG_CNT);
+ DECLARE_BITMAP(jack_bits, EXTCON_PROP_JACK_CNT);
+ DECLARE_BITMAP(disp_bits, EXTCON_PROP_DISP_CNT);
};
static struct class *extcon_class;
+static DEFINE_IDA(extcon_dev_ids);
static LIST_HEAD(extcon_dev_list);
static DEFINE_MUTEX(extcon_dev_list_lock);
static int check_mutually_exclusive(struct extcon_dev *edev, u32 new_state)
{
- int i = 0;
+ int i;
if (!edev->mutually_exclusive)
return 0;
@@ -362,12 +372,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
struct extcon_dev *edev = dev_get_drvdata(dev);
if (edev->max_supported == 0)
- return sprintf(buf, "%u\n", edev->state);
+ return sysfs_emit(buf, "%u\n", edev->state);
for (i = 0; i < edev->max_supported; i++) {
- count += sprintf(buf + count, "%s=%d\n",
- extcon_info[edev->supported_cable[i]].name,
- !!(edev->state & BIT(i)));
+ count += sysfs_emit_at(buf, count, "%s=%d\n",
+ extcon_info[edev->supported_cable[i]].name,
+ !!(edev->state & BIT(i)));
}
return count;
@@ -379,7 +389,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
{
struct extcon_dev *edev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", edev->name);
+ return sysfs_emit(buf, "%s\n", edev->name);
}
static DEVICE_ATTR_RO(name);
@@ -390,8 +400,8 @@ static ssize_t cable_name_show(struct device *dev,
attr_name);
int i = cable->cable_index;
- return sprintf(buf, "%s\n",
- extcon_info[cable->edev->supported_cable[i]].name);
+ return sysfs_emit(buf, "%s\n",
+ extcon_info[cable->edev->supported_cable[i]].name);
}
static ssize_t cable_state_show(struct device *dev,
@@ -402,8 +412,8 @@ static ssize_t cable_state_show(struct device *dev,
int i = cable->cable_index;
- return sprintf(buf, "%d\n",
- extcon_get_state(cable->edev, cable->edev->supported_cable[i]));
+ return sysfs_emit(buf, "%d\n",
+ extcon_get_state(cable->edev, cable->edev->supported_cable[i]));
}
/**
@@ -1012,12 +1022,13 @@ ATTRIBUTE_GROUPS(extcon);
static int create_extcon_class(void)
{
- if (!extcon_class) {
- extcon_class = class_create("extcon");
- if (IS_ERR(extcon_class))
- return PTR_ERR(extcon_class);
- extcon_class->dev_groups = extcon_groups;
- }
+ if (extcon_class)
+ return 0;
+
+ extcon_class = class_create("extcon");
+ if (IS_ERR(extcon_class))
+ return PTR_ERR(extcon_class);
+ extcon_class->dev_groups = extcon_groups;
return 0;
}
@@ -1070,6 +1081,156 @@ void extcon_dev_free(struct extcon_dev *edev)
EXPORT_SYMBOL_GPL(extcon_dev_free);
/**
+ * extcon_alloc_cables() - alloc the cables for extcon device
+ * @edev: extcon device which has cables
+ *
+ * Returns 0 if success or error number if fail.
+ */
+static int extcon_alloc_cables(struct extcon_dev *edev)
+{
+ int index;
+ char *str;
+ struct extcon_cable *cable;
+
+ if (!edev)
+ return -EINVAL;
+
+ if (!edev->max_supported)
+ return 0;
+
+ edev->cables = kcalloc(edev->max_supported, sizeof(*edev->cables),
+ GFP_KERNEL);
+ if (!edev->cables)
+ return -ENOMEM;
+
+ for (index = 0; index < edev->max_supported; index++) {
+ cable = &edev->cables[index];
+
+ str = kasprintf(GFP_KERNEL, "cable.%d", index);
+ if (!str) {
+ for (index--; index >= 0; index--) {
+ cable = &edev->cables[index];
+ kfree(cable->attr_g.name);
+ }
+
+ kfree(edev->cables);
+ return -ENOMEM;
+ }
+
+ cable->edev = edev;
+ cable->cable_index = index;
+ cable->attrs[0] = &cable->attr_name.attr;
+ cable->attrs[1] = &cable->attr_state.attr;
+ cable->attrs[2] = NULL;
+ cable->attr_g.name = str;
+ cable->attr_g.attrs = cable->attrs;
+
+ sysfs_attr_init(&cable->attr_name.attr);
+ cable->attr_name.attr.name = "name";
+ cable->attr_name.attr.mode = 0444;
+ cable->attr_name.show = cable_name_show;
+
+ sysfs_attr_init(&cable->attr_state.attr);
+ cable->attr_state.attr.name = "state";
+ cable->attr_state.attr.mode = 0444;
+ cable->attr_state.show = cable_state_show;
+ }
+
+ return 0;
+}
+
+/**
+ * extcon_alloc_muex() - alloc the mutual exclusive for extcon device
+ * @edev: extcon device
+ *
+ * Returns 0 if success or error number if fail.
+ */
+static int extcon_alloc_muex(struct extcon_dev *edev)
+{
+ char *name;
+ int index;
+
+ if (!edev)
+ return -EINVAL;
+
+ if (!(edev->max_supported && edev->mutually_exclusive))
+ return 0;
+
+ /* Count the size of mutually_exclusive array */
+ for (index = 0; edev->mutually_exclusive[index]; index++)
+ ;
+
+ edev->attrs_muex = kcalloc(index + 1, sizeof(*edev->attrs_muex),
+ GFP_KERNEL);
+ if (!edev->attrs_muex)
+ return -ENOMEM;
+
+ edev->d_attrs_muex = kcalloc(index, sizeof(*edev->d_attrs_muex),
+ GFP_KERNEL);
+ if (!edev->d_attrs_muex) {
+ kfree(edev->attrs_muex);
+ return -ENOMEM;
+ }
+
+ for (index = 0; edev->mutually_exclusive[index]; index++) {
+ name = kasprintf(GFP_KERNEL, "0x%x",
+ edev->mutually_exclusive[index]);
+ if (!name) {
+ for (index--; index >= 0; index--)
+ kfree(edev->d_attrs_muex[index].attr.name);
+
+ kfree(edev->d_attrs_muex);
+ kfree(edev->attrs_muex);
+ return -ENOMEM;
+ }
+ sysfs_attr_init(&edev->d_attrs_muex[index].attr);
+ edev->d_attrs_muex[index].attr.name = name;
+ edev->d_attrs_muex[index].attr.mode = 0000;
+ edev->attrs_muex[index] = &edev->d_attrs_muex[index].attr;
+ }
+ edev->attr_g_muex.name = muex_name;
+ edev->attr_g_muex.attrs = edev->attrs_muex;
+
+ return 0;
+}
+
+/**
+ * extcon_alloc_groups() - alloc the groups for extcon device
+ * @edev: extcon device
+ *
+ * Returns 0 if success or error number if fail.
+ */
+static int extcon_alloc_groups(struct extcon_dev *edev)
+{
+ int index;
+
+ if (!edev)
+ return -EINVAL;
+
+ if (!edev->max_supported)
+ return 0;
+
+ edev->extcon_dev_type.groups = kcalloc(edev->max_supported + 2,
+ sizeof(*edev->extcon_dev_type.groups),
+ GFP_KERNEL);
+ if (!edev->extcon_dev_type.groups)
+ return -ENOMEM;
+
+ edev->extcon_dev_type.name = dev_name(&edev->dev);
+ edev->extcon_dev_type.release = dummy_sysfs_dev_release;
+
+ for (index = 0; index < edev->max_supported; index++)
+ edev->extcon_dev_type.groups[index] = &edev->cables[index].attr_g;
+
+ if (edev->mutually_exclusive)
+ edev->extcon_dev_type.groups[index] = &edev->attr_g_muex;
+
+ edev->dev.type = &edev->extcon_dev_type;
+
+ return 0;
+}
+
+/**
* extcon_dev_register() - Register an new extcon device
* @edev: the extcon device to be registered
*
@@ -1085,19 +1246,16 @@ EXPORT_SYMBOL_GPL(extcon_dev_free);
*/
int extcon_dev_register(struct extcon_dev *edev)
{
- int ret, index = 0;
- static atomic_t edev_no = ATOMIC_INIT(-1);
+ int ret, index;
- if (!extcon_class) {
- ret = create_extcon_class();
- if (ret < 0)
- return ret;
- }
+ ret = create_extcon_class();
+ if (ret < 0)
+ return ret;
if (!edev || !edev->supported_cable)
return -EINVAL;
- for (; edev->supported_cable[index] != EXTCON_NONE; index++);
+ for (index = 0; edev->supported_cable[index] != EXTCON_NONE; index++);
edev->max_supported = index;
if (index > SUPPORTED_CABLE_MAX) {
@@ -1115,124 +1273,26 @@ int extcon_dev_register(struct extcon_dev *edev)
"extcon device name is null\n");
return -EINVAL;
}
- dev_set_name(&edev->dev, "extcon%lu",
- (unsigned long)atomic_inc_return(&edev_no));
-
- if (edev->max_supported) {
- char *str;
- struct extcon_cable *cable;
-
- edev->cables = kcalloc(edev->max_supported,
- sizeof(struct extcon_cable),
- GFP_KERNEL);
- if (!edev->cables) {
- ret = -ENOMEM;
- goto err_sysfs_alloc;
- }
- for (index = 0; index < edev->max_supported; index++) {
- cable = &edev->cables[index];
-
- str = kasprintf(GFP_KERNEL, "cable.%d", index);
- if (!str) {
- for (index--; index >= 0; index--) {
- cable = &edev->cables[index];
- kfree(cable->attr_g.name);
- }
- ret = -ENOMEM;
-
- goto err_alloc_cables;
- }
-
- cable->edev = edev;
- cable->cable_index = index;
- cable->attrs[0] = &cable->attr_name.attr;
- cable->attrs[1] = &cable->attr_state.attr;
- cable->attrs[2] = NULL;
- cable->attr_g.name = str;
- cable->attr_g.attrs = cable->attrs;
-
- sysfs_attr_init(&cable->attr_name.attr);
- cable->attr_name.attr.name = "name";
- cable->attr_name.attr.mode = 0444;
- cable->attr_name.show = cable_name_show;
-
- sysfs_attr_init(&cable->attr_state.attr);
- cable->attr_state.attr.name = "state";
- cable->attr_state.attr.mode = 0444;
- cable->attr_state.show = cable_state_show;
- }
- }
- if (edev->max_supported && edev->mutually_exclusive) {
- char *name;
-
- /* Count the size of mutually_exclusive array */
- for (index = 0; edev->mutually_exclusive[index]; index++)
- ;
-
- edev->attrs_muex = kcalloc(index + 1,
- sizeof(struct attribute *),
- GFP_KERNEL);
- if (!edev->attrs_muex) {
- ret = -ENOMEM;
- goto err_muex;
- }
-
- edev->d_attrs_muex = kcalloc(index,
- sizeof(struct device_attribute),
- GFP_KERNEL);
- if (!edev->d_attrs_muex) {
- ret = -ENOMEM;
- kfree(edev->attrs_muex);
- goto err_muex;
- }
-
- for (index = 0; edev->mutually_exclusive[index]; index++) {
- name = kasprintf(GFP_KERNEL, "0x%x",
- edev->mutually_exclusive[index]);
- if (!name) {
- for (index--; index >= 0; index--) {
- kfree(edev->d_attrs_muex[index].attr.
- name);
- }
- kfree(edev->d_attrs_muex);
- kfree(edev->attrs_muex);
- ret = -ENOMEM;
- goto err_muex;
- }
- sysfs_attr_init(&edev->d_attrs_muex[index].attr);
- edev->d_attrs_muex[index].attr.name = name;
- edev->d_attrs_muex[index].attr.mode = 0000;
- edev->attrs_muex[index] = &edev->d_attrs_muex[index]
- .attr;
- }
- edev->attr_g_muex.name = muex_name;
- edev->attr_g_muex.attrs = edev->attrs_muex;
+ ret = ida_alloc(&extcon_dev_ids, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
- }
+ edev->id = ret;
- if (edev->max_supported) {
- edev->extcon_dev_type.groups =
- kcalloc(edev->max_supported + 2,
- sizeof(struct attribute_group *),
- GFP_KERNEL);
- if (!edev->extcon_dev_type.groups) {
- ret = -ENOMEM;
- goto err_alloc_groups;
- }
+ dev_set_name(&edev->dev, "extcon%d", edev->id);
- edev->extcon_dev_type.name = dev_name(&edev->dev);
- edev->extcon_dev_type.release = dummy_sysfs_dev_release;
+ ret = extcon_alloc_cables(edev);
+ if (ret < 0)
+ goto err_alloc_cables;
- for (index = 0; index < edev->max_supported; index++)
- edev->extcon_dev_type.groups[index] =
- &edev->cables[index].attr_g;
- if (edev->mutually_exclusive)
- edev->extcon_dev_type.groups[index] =
- &edev->attr_g_muex;
+ ret = extcon_alloc_muex(edev);
+ if (ret < 0)
+ goto err_alloc_muex;
- edev->dev.type = &edev->extcon_dev_type;
- }
+ ret = extcon_alloc_groups(edev);
+ if (ret < 0)
+ goto err_alloc_groups;
spin_lock_init(&edev->lock);
if (edev->max_supported) {
@@ -1277,13 +1337,14 @@ err_alloc_groups:
kfree(edev->d_attrs_muex);
kfree(edev->attrs_muex);
}
-err_muex:
+err_alloc_muex:
for (index = 0; index < edev->max_supported; index++)
kfree(edev->cables[index].attr_g.name);
-err_alloc_cables:
if (edev->max_supported)
kfree(edev->cables);
-err_sysfs_alloc:
+err_alloc_cables:
+ ida_free(&extcon_dev_ids, edev->id);
+
return ret;
}
EXPORT_SYMBOL_GPL(extcon_dev_register);
@@ -1306,12 +1367,13 @@ void extcon_dev_unregister(struct extcon_dev *edev)
list_del(&edev->entry);
mutex_unlock(&extcon_dev_list_lock);
- if (IS_ERR_OR_NULL(get_device(&edev->dev))) {
- dev_err(&edev->dev, "Failed to unregister extcon_dev (%s)\n",
- dev_name(&edev->dev));
+ if (!get_device(&edev->dev)) {
+ dev_err(&edev->dev, "Failed to unregister extcon_dev\n");
return;
}
+ ida_free(&extcon_dev_ids, edev->id);
+
device_unregister(&edev->dev);
if (edev->mutually_exclusive && edev->max_supported) {
@@ -1349,7 +1411,7 @@ struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
mutex_lock(&extcon_dev_list_lock);
list_for_each_entry(edev, &extcon_dev_list, entry)
- if (edev->dev.parent && edev->dev.parent->of_node == node)
+ if (edev->dev.parent && device_match_of_node(edev->dev.parent, node))
goto out;
edev = ERR_PTR(-EPROBE_DEFER);
out:
@@ -1367,21 +1429,17 @@ out:
*/
struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
{
- struct device_node *node;
+ struct device_node *node, *np = dev_of_node(dev);
struct extcon_dev *edev;
- if (!dev)
- return ERR_PTR(-EINVAL);
-
- if (!dev->of_node) {
+ if (!np) {
dev_dbg(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
- node = of_parse_phandle(dev->of_node, "extcon", index);
+ node = of_parse_phandle(np, "extcon", index);
if (!node) {
- dev_dbg(dev, "failed to get phandle in %pOF node\n",
- dev->of_node);
+ dev_dbg(dev, "failed to get phandle in %pOF node\n", np);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/extcon/extcon.h b/drivers/extcon/extcon.h
index 93b5e0306966..946182687786 100644
--- a/drivers/extcon/extcon.h
+++ b/drivers/extcon/extcon.h
@@ -13,13 +13,14 @@
* are disabled.
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
* be attached simultaneously. The array should be
- * ending with NULL or be NULL (no mutually exclusive
- * cables). For example, if it is { 0x7, 0x30, 0}, then,
+ * ending with 0 or be NULL (no mutually exclusive cables).
+ * For example, if it is {0x7, 0x30, 0}, then,
* {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
* be attached simulataneously. {0x7, 0} is equivalent to
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
* can be no simultaneous connections.
* @dev: Device of this extcon.
+ * @id: Unique device ID of this extcon.
* @state: Attach/detach state of this extcon. Do not provide at
* register-time.
* @nh_all: Notifier for the state change events for all supported
@@ -27,7 +28,7 @@
* @nh: Notifier for the state change events from this extcon
* @entry: To support list of extcon devices so that users can
* search for extcon devices based on the extcon name.
- * @lock:
+ * @lock: Protects device state and serialises device registration
* @max_supported: Internal value to store the number of cables.
* @extcon_dev_type: Device_type struct to provide attribute_groups
* customized for each extcon device.
@@ -46,6 +47,7 @@ struct extcon_dev {
/* Internal data. Please do not set. */
struct device dev;
+ unsigned int id;
struct raw_notifier_head nh_all;
struct raw_notifier_head *nh;
struct list_head entry;
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index f29d77ecf72d..2b8bfcd010f5 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -15,6 +15,8 @@
#include "common.h"
+static DEFINE_IDA(ffa_bus_id);
+
static int ffa_device_match(struct device *dev, struct device_driver *drv)
{
const struct ffa_device_id *id_table;
@@ -53,7 +55,8 @@ static void ffa_device_remove(struct device *dev)
{
struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
- ffa_drv->remove(to_ffa_dev(dev));
+ if (ffa_drv->remove)
+ ffa_drv->remove(to_ffa_dev(dev));
}
static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -130,6 +133,7 @@ static void ffa_release_device(struct device *dev)
{
struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ ida_free(&ffa_bus_id, ffa_dev->id);
kfree(ffa_dev);
}
@@ -170,18 +174,24 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
const struct ffa_ops *ops)
{
- int ret;
+ int id, ret;
struct device *dev;
struct ffa_device *ffa_dev;
+ id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
- if (!ffa_dev)
+ if (!ffa_dev) {
+ ida_free(&ffa_bus_id, id);
return NULL;
+ }
dev = &ffa_dev->dev;
dev->bus = &ffa_bus_type;
dev->release = ffa_release_device;
- dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
+ dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
ffa_dev->vm_id = vm_id;
ffa_dev->ops = ops;
@@ -217,4 +227,5 @@ void arm_ffa_bus_exit(void)
{
ffa_devices_unregister();
bus_unregister(&ffa_bus_type);
+ ida_destroy(&ffa_bus_id);
}
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index fa85c64d3ded..e23409138667 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -193,7 +193,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
int idx, count, flags = 0, sz, buf_sz;
ffa_value_t partition_info;
- if (!buffer || !num_partitions) /* Just get the count for now */
+ if (drv_info->version > FFA_VERSION_1_0 &&
+ (!buffer || !num_partitions)) /* Just get the count for now */
flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
mutex_lock(&drv_info->rx_lock);
@@ -420,12 +421,17 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+ ep_mem_access->flag = 0;
+ ep_mem_access->reserved = 0;
}
+ mem_region->reserved_0 = 0;
+ mem_region->reserved_1 = 0;
mem_region->ep_count = args->nattrs;
composite = buffer + COMPOSITE_OFFSET(args->nattrs);
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
composite->addr_range_cnt = num_entries;
+ composite->reserved = 0;
length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
@@ -460,6 +466,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
constituents->address = sg_phys(args->sg);
constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+ constituents->reserved = 0;
constituents++;
frag_len += sizeof(struct ffa_mem_region_addr_range);
} while ((args->sg = sg_next(args->sg)));
diff --git a/drivers/firmware/arm_scmi/raw_mode.c b/drivers/firmware/arm_scmi/raw_mode.c
index d40df099fd51..6971dcf72fb9 100644
--- a/drivers/firmware/arm_scmi/raw_mode.c
+++ b/drivers/firmware/arm_scmi/raw_mode.c
@@ -1066,7 +1066,7 @@ static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
WQ_UNBOUND | WQ_FREEZABLE |
- WQ_HIGHPRI, WQ_SYSFS, raw->id);
+ WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
if (!raw->wait_wq)
return -ENOMEM;
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index 82c64cb9f531..74363ed7501f 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -51,7 +51,8 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
*
* It's not easily possible to fix this in struct screen_info,
* as this could break UAPI. The best solution is to compute
- * bits_per_pixel here and ignore lfb_depth. In the loop below,
+ * bits_per_pixel from the color bits, reserved bits and
+ * reported lfb_depth, whichever is highest. In the loop below,
* ignore simplefb formats with alpha bits, as EFI and VESA
* don't specify alpha channels.
*/
@@ -60,6 +61,7 @@ __init bool sysfb_parse_mode(const struct screen_info *si,
si->green_size + si->green_pos,
si->blue_size + si->blue_pos),
si->rsvd_size + si->rsvd_pos);
+ bits_per_pixel = max_t(u32, bits_per_pixel, si->lfb_depth);
} else {
bits_per_pixel = si->lfb_depth;
}
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index f2253fd5ab4b..8ff5f4ff5958 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -100,13 +100,23 @@ static const struct regmap_irq dio48e_regmap_irqs[] = {
DIO48E_REGMAP_IRQ(0), DIO48E_REGMAP_IRQ(1),
};
-static int dio48e_handle_mask_sync(struct regmap *const map, const int index,
+/**
+ * struct dio48e_gpio - GPIO device private data structure
+ * @map: Regmap for the device
+ * @irq_mask: Current IRQ mask state on the device
+ */
+struct dio48e_gpio {
+ struct regmap *map;
+ unsigned int irq_mask;
+};
+
+static int dio48e_handle_mask_sync(const int index,
const unsigned int mask_buf_def,
const unsigned int mask_buf,
void *const irq_drv_data)
{
- unsigned int *const irq_mask = irq_drv_data;
- const unsigned int prev_mask = *irq_mask;
+ struct dio48e_gpio *const dio48egpio = irq_drv_data;
+ const unsigned int prev_mask = dio48egpio->irq_mask;
int err;
unsigned int val;
@@ -115,19 +125,19 @@ static int dio48e_handle_mask_sync(struct regmap *const map, const int index,
return 0;
/* remember the current mask for the next mask sync */
- *irq_mask = mask_buf;
+ dio48egpio->irq_mask = mask_buf;
/* if all previously masked, enable interrupts when unmasking */
if (prev_mask == mask_buf_def) {
- err = regmap_write(map, DIO48E_CLEAR_INTERRUPT, 0x00);
+ err = regmap_write(dio48egpio->map, DIO48E_CLEAR_INTERRUPT, 0x00);
if (err)
return err;
- return regmap_write(map, DIO48E_ENABLE_INTERRUPT, 0x00);
+ return regmap_write(dio48egpio->map, DIO48E_ENABLE_INTERRUPT, 0x00);
}
/* if all are currently masked, disable interrupts */
if (mask_buf == mask_buf_def)
- return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val);
+ return regmap_read(dio48egpio->map, DIO48E_DISABLE_INTERRUPT, &val);
return 0;
}
@@ -168,7 +178,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
struct regmap *map;
int err;
struct regmap_irq_chip *chip;
- unsigned int irq_mask;
+ struct dio48e_gpio *dio48egpio;
struct regmap_irq_chip_data *chip_data;
if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) {
@@ -186,12 +196,14 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return dev_err_probe(dev, PTR_ERR(map),
"Unable to initialize register map\n");
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
+ dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
+ if (!dio48egpio)
return -ENOMEM;
- chip->irq_drv_data = devm_kzalloc(dev, sizeof(irq_mask), GFP_KERNEL);
- if (!chip->irq_drv_data)
+ dio48egpio->map = map;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
chip->name = name;
@@ -202,6 +214,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
chip->irqs = dio48e_regmap_irqs;
chip->num_irqs = ARRAY_SIZE(dio48e_regmap_irqs);
chip->handle_mask_sync = dio48e_handle_mask_sync;
+ chip->irq_drv_data = dio48egpio;
/* Initialize to prevent spurious interrupts before we're ready */
err = dio48e_irq_init_hw(map);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 02b827785e39..957b18bda4a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -244,7 +244,6 @@ extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
-#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 0385f7f69278..a46285841d17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -53,7 +53,6 @@ int amdgpu_amdkfd_init(void)
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
ret = kgd2kfd_init();
- amdgpu_amdkfd_gpuvm_init_mem_limits();
kfd_initialized = !ret;
return ret;
@@ -143,6 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
int i;
int last_valid_bit;
+ amdgpu_amdkfd_gpuvm_init_mem_limits();
+
if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap =
@@ -162,7 +163,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* clear
*/
bitmap_complement(gpu_resources.cp_queue_bitmap,
- adev->gfx.mec.queue_bitmap,
+ adev->gfx.mec_bitmap[0].queue_bitmap,
KGD_MAX_QUEUES);
/* According to linux/bitmap.h we shouldn't use bitmap_clear if
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 9378fc79e9ea..f599e1e74fcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -288,7 +288,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
int r;
@@ -303,7 +303,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -330,7 +330,7 @@ static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index ba21ec6b35e0..5c4152ae44da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -275,7 +275,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v10_compute_mqd *m;
uint32_t mec, pipe;
int r;
@@ -290,7 +290,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -317,7 +317,7 @@ static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index 7e80caa05060..5cdb7289d35b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -260,7 +260,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v11_compute_mqd *m;
uint32_t mec, pipe;
int r;
@@ -275,7 +275,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -302,7 +302,7 @@ static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index e92b93557c13..ae06d1f2af93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -50,12 +50,12 @@ static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, mec, pipe, queue, vmid);
+ soc15_grbm_select(adev, mec, pipe, queue, vmid, 0);
}
static void unlock_srbm(struct amdgpu_device *adev)
{
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
@@ -300,7 +300,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t doorbell_off)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
struct v9_mqd *m;
uint32_t mec, pipe;
int r;
@@ -315,7 +315,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
mec, pipe, queue_id);
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_ring_alloc(kiq_ring, 7);
if (r) {
pr_err("Failed to alloc KIQ (%d).\n", r);
@@ -342,7 +342,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
amdgpu_ring_commit(kiq_ring);
out_unlock:
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
release_queue(adev);
return r;
@@ -700,7 +700,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
*wave_cnt = 0;
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
- soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, 0);
reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
queue_slot);
*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
@@ -772,13 +772,13 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
lock_spi_csq_mutexes(adev);
- soc15_grbm_select(adev, 1, 0, 0, 0);
+ soc15_grbm_select(adev, 1, 0, 0, 0, 0);
/*
* Iterate through the shader engines and arrays of the device
* to get number of waves in flight
*/
- bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap,
+ bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
KGD_MAX_QUEUES);
max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
@@ -787,7 +787,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
for (se_idx = 0; se_idx < se_cnt; se_idx++) {
for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
- amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, 0);
queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS);
/*
@@ -820,8 +820,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
unlock_spi_csq_mutexes(adev);
/* Update the output parameters and return */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 83a83ced2439..c3990a5eb7c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -36,6 +36,7 @@
#include <uapi/linux/kfd_ioctl.h>
#include "amdgpu_xgmi.h"
#include "kfd_smi_events.h"
+#include <drm/ttm/ttm_tt.h>
/* Userptr restore delay, just long enough to allow consecutive VM
* changes to accumulate
@@ -110,13 +111,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
struct sysinfo si;
uint64_t mem;
+ if (kfd_mem_limit.max_system_mem_limit)
+ return;
+
si_meminfo(&si);
mem = si.freeram - si.freehigh;
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
- kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
(kfd_mem_limit.max_ttm_mem_limit >> 20));
@@ -2445,7 +2449,9 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
ret = -EAGAIN;
goto unlock_out;
}
- mem->invalid = 0;
+ /* set mem valid if mem has hmm range associated */
+ if (mem->range)
+ mem->invalid = 0;
}
unlock_out:
@@ -2577,8 +2583,15 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
- bool valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ bool valid;
+
+ /* keep mem without hmm range at userptr_inval_list */
+ if (!mem->range)
+ continue;
+
+ /* Only check mem with hmm range associated */
+ valid = amdgpu_ttm_tt_get_user_pages_done(
+ mem->bo->tbo.ttm, mem->range);
mem->range = NULL;
if (!valid) {
@@ -2586,7 +2599,12 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
ret = -EAGAIN;
continue;
}
- WARN(mem->invalid, "Valid BO is marked invalid");
+
+ if (mem->invalid) {
+ WARN(1, "Valid BO is marked invalid");
+ ret = -EAGAIN;
+ continue;
+ }
list_move_tail(&mem->validate_list.head,
&process_info->userptr_valid_list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 30c28a69e847..b582b83c4984 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -104,9 +104,8 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size);
- if (!bios) {
+ if (!bios)
return false;
- }
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
@@ -133,9 +132,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
adev->bios = NULL;
/* XXX: some cards may return 0 for rom size? ddx has a workaround */
bios = pci_map_rom(adev->pdev, &size);
- if (!bios) {
+ if (!bios)
return false;
- }
adev->bios = kzalloc(size, GFP_KERNEL);
if (adev->bios == NULL) {
@@ -168,9 +166,9 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) ||
- 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
- AMD_VBIOS_SIGNATURE,
- strlen(AMD_VBIOS_SIGNATURE)))
+ memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
+ AMD_VBIOS_SIGNATURE,
+ strlen(AMD_VBIOS_SIGNATURE)) != 0)
return false;
/* valid vbios, go on */
@@ -264,7 +262,7 @@ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ DRM_ERROR("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
@@ -363,7 +361,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
- unsigned offset;
+ unsigned int offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 6be30dcb029d..d34037b85cf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -593,11 +593,20 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
switch (val) {
default:
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ case DRM_MODE_SCALE_NONE:
+ rmx_type = RMX_OFF;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
}
+
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
@@ -799,12 +808,21 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
}
switch (value) {
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_NONE:
+ rmx_type = RMX_OFF;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
default:
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
}
+
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
@@ -1127,7 +1145,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* assume digital unless load detected otherwise */
amdgpu_connector->use_digital = true;
lret = encoder_funcs->detect(encoder, connector);
- DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n",
+ encoder->encoder_type, lret);
if (lret == connector_status_connected)
amdgpu_connector->use_digital = false;
}
@@ -1991,7 +2010,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
if (i2c_bus->valid) {
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 2eb2c66843a8..6e1d331af01f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -112,6 +112,9 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
if (r < 0)
return r;
+ if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
+ return -EINVAL;
+
++(num_ibs[r]);
p->gang_leader_idx = r;
return 0;
@@ -192,7 +195,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
- unsigned int size;
+ size_t size;
int ret;
int i;
@@ -285,6 +288,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
break;
default:
@@ -393,7 +397,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
{
struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned num_deps;
+ unsigned int num_deps;
int i, r;
num_deps = chunk->length_dw * 4 /
@@ -464,7 +468,7 @@ static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i, r;
num_deps = chunk->length_dw * 4 /
@@ -482,7 +486,7 @@ static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i, r;
num_deps = chunk->length_dw * 4 /
@@ -502,7 +506,7 @@ static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i;
num_deps = chunk->length_dw * 4 /
@@ -536,7 +540,7 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
- unsigned num_deps;
+ unsigned int num_deps;
int i;
num_deps = chunk->length_dw * 4 /
@@ -575,6 +579,26 @@ static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
return 0;
}
+static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
+ int i;
+
+ if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
+ return -EINVAL;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ p->jobs[i]->shadow_va = shadow->shadow_va;
+ p->jobs[i]->csa_va = shadow->csa_va;
+ p->jobs[i]->gds_va = shadow->gds_va;
+ p->jobs[i]->init_shadow =
+ shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
+ }
+
+ return 0;
+}
+
static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
{
unsigned int ce_preempt = 0, de_preempt = 0;
@@ -617,6 +641,11 @@ static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
if (r)
return r;
break;
+ case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
+ r = amdgpu_cs_p2_shadow(p, chunk);
+ if (r)
+ return r;
+ break;
}
}
@@ -729,6 +758,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
+
adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
increment_us, us_upper_bound);
@@ -1047,9 +1077,8 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
/* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
+ if (r)
return r;
- }
kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
@@ -1356,7 +1385,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* Cleanup the parser structure */
static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
- unsigned i;
+ unsigned int i;
amdgpu_sync_free(&parser->sync);
for (i = 0; i < parser->num_post_deps; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d2139ac12159..e1f642a3dc2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -576,6 +576,9 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ if (amdgpu_in_reset(adev))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS;
+
if (adev->ras_enabled && con) {
/* Return the cached values in O(1),
* and schedule delayed work to cache
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f60753f97ac5..df94cd2c4b39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -56,14 +56,14 @@
*
* Bit 62: Indicates a GRBM bank switch is needed
* Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
- * zero)
+ * zero)
* Bits 24..33: The SE or ME selector if needed
* Bits 34..43: The SH (or SA) or PIPE selector if needed
* Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
*
* Bit 23: Indicates that the PM power gating lock should be held
- * This is necessary to read registers that might be
- * unreliable during a power gating transistion.
+ * This is necessary to read registers that might be
+ * unreliable during a power gating transistion.
*
* The lower bits are the BYTE offset of the register to read. This
* allows reading multiple registers in a single call and having
@@ -76,7 +76,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
ssize_t result = 0;
int r;
bool pm_pg_lock, use_bank, use_ring;
- unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
+ unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
pm_pg_lock = use_bank = use_ring = false;
instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
@@ -136,7 +136,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
}
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
+ sh_bank, instance_bank, 0);
} else if (use_ring) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
@@ -169,7 +169,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
end:
if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
} else if (use_ring) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
@@ -208,7 +208,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd;
- rd = kzalloc(sizeof *rd, GFP_KERNEL);
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->adev = file_inode(file)->i_private;
@@ -221,6 +221,7 @@ static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd = file->private_data;
+
mutex_destroy(&rd->lock);
kfree(file->private_data);
return 0;
@@ -263,7 +264,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
rd->id.grbm.sh,
- rd->id.grbm.instance);
+ rd->id.grbm.instance, 0);
}
if (rd->id.use_srbm) {
@@ -295,7 +296,7 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
}
end:
if (rd->id.use_grbm) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -324,7 +325,8 @@ static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigne
switch (cmd) {
case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
mutex_lock(&rd->lock);
- r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
+ r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data,
+ sizeof(rd->id));
mutex_unlock(&rd->lock);
return r ? -EINVAL : 0;
default:
@@ -863,7 +865,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
* The offset being sought changes which wave that the status data
* will be returned for. The bits are used as follows:
*
- * Bits 0..6: Byte offset into data
+ * Bits 0..6: Byte offset into data
* Bits 7..14: SE selector
* Bits 15..22: SH/SA selector
* Bits 23..30: CU/{WGP+SIMD} selector
@@ -907,13 +909,13 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
x = 0;
if (adev->gfx.funcs->read_wave_data)
adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -1001,7 +1003,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
if (bank == 0) {
if (adev->gfx.funcs->read_wave_vgprs)
@@ -1011,7 +1013,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
}
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -1429,7 +1431,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_gfxoff_residency_fops,
};
-static const char *debugfs_regs_names[] = {
+static const char * const debugfs_regs_names[] = {
"amdgpu_regs",
"amdgpu_regs2",
"amdgpu_regs_didt",
@@ -1447,7 +1449,7 @@ static const char *debugfs_regs_names[] = {
/**
* amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
- * register access.
+ * register access.
*
* @adev: The device to attach the debugfs entries to
*/
@@ -1459,7 +1461,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
+ S_IFREG | 0444, root,
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
@@ -1494,12 +1496,12 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_park(ring->sched.thread);
}
- seq_printf(m, "run ib test:\n");
+ seq_puts(m, "run ib test:\n");
r = amdgpu_ib_ring_tests(adev);
if (r)
seq_printf(m, "ib ring tests failed (%d).\n", r);
else
- seq_printf(m, "ib ring tests passed.\n");
+ seq_puts(m, "ib ring tests passed.\n");
/* go on the scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -1978,7 +1980,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_debugfs_ring_init(adev, ring);
}
- for ( i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (!amdgpu_vcnfw_log)
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 981a9cfb63b5..ae0bcffa2591 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -998,7 +998,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
if (array_size % 3)
return;
- for (i = 0; i < array_size; i +=3) {
+ for (i = 0; i < array_size; i += 3) {
reg = registers[i + 0];
and_mask = registers[i + 1];
or_mask = registers[i + 2];
@@ -1547,7 +1547,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
- } else if (!is_power_of_2(amdgpu_sched_jobs)){
+ } else if (!is_power_of_2(amdgpu_sched_jobs)) {
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
@@ -2759,8 +2759,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
/* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
- if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)||
- adev->asic_type == CHIP_ALDEBARAN ))
+ if (amdgpu_passthrough(adev) &&
+ ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
+ adev->asic_type == CHIP_ALDEBARAN))
amdgpu_dpm_handle_passthrough_sbr(adev, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
@@ -3089,7 +3090,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
- if(!amdgpu_sriov_vf(adev)){
+ if (!amdgpu_sriov_vf(adev)) {
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
if (r) {
@@ -3757,6 +3758,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
+ * internal path natively support atomics, set have_atomics_support to true.
+ */
+ else if ((adev->flags & AMD_IS_APU) &&
+ (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
+ adev->have_atomics_support = true;
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
@@ -4043,7 +4050,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
- if (adev->mode_info.mode_config_initialized){
+ if (adev->mode_info.mode_config_initialized) {
if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
drm_helper_force_disable_all(adev_to_drm(adev));
else
@@ -4704,42 +4711,42 @@ disabled:
int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
{
- u32 i;
- int ret = 0;
+ u32 i;
+ int ret = 0;
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
- dev_info(adev->dev, "GPU mode1 reset\n");
+ dev_info(adev->dev, "GPU mode1 reset\n");
- /* disable BM */
- pci_clear_master(adev->pdev);
+ /* disable BM */
+ pci_clear_master(adev->pdev);
- amdgpu_device_cache_pci_state(adev->pdev);
+ amdgpu_device_cache_pci_state(adev->pdev);
- if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
- dev_info(adev->dev, "GPU smu mode1 reset\n");
- ret = amdgpu_dpm_mode1_reset(adev);
- } else {
- dev_info(adev->dev, "GPU psp mode1 reset\n");
- ret = psp_gpu_reset(adev);
- }
+ if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ dev_info(adev->dev, "GPU smu mode1 reset\n");
+ ret = amdgpu_dpm_mode1_reset(adev);
+ } else {
+ dev_info(adev->dev, "GPU psp mode1 reset\n");
+ ret = psp_gpu_reset(adev);
+ }
- if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
- amdgpu_device_load_pci_state(adev->pdev);
+ amdgpu_device_load_pci_state(adev->pdev);
- /* wait for asic to come out of reset */
- for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio.funcs->get_memsize(adev);
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
- if (memsize != 0xffffffff)
- break;
- udelay(1);
- }
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
- return ret;
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ return ret;
}
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 0ecce0b92b82..b58d94dc1924 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -311,7 +311,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) {
/* ignore the discovery binary from vram if discovery=2 in kernel module parameter */
if (amdgpu_discovery == 2)
- dev_info(adev->dev,"force read ip discovery binary from file");
+ dev_info(adev->dev, "force read ip discovery binary from file");
else
dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
@@ -323,7 +323,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
/* check the ip discovery binary signature */
- if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
r = -EINVAL;
goto out;
@@ -529,8 +529,8 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
break;
default:
break;
- }
- }
+ }
+ }
next_ip:
ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
@@ -1208,54 +1208,6 @@ next_ip:
return 0;
}
-int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
- int *major, int *minor, int *revision)
-{
- struct binary_header *bhdr;
- struct ip_discovery_header *ihdr;
- struct die_header *dhdr;
- struct ip *ip;
- uint16_t die_offset;
- uint16_t ip_offset;
- uint16_t num_dies;
- uint16_t num_ips;
- int i, j;
-
- if (!adev->mman.discovery_bin) {
- DRM_ERROR("ip discovery uninitialized\n");
- return -EINVAL;
- }
-
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
- num_dies = le16_to_cpu(ihdr->num_dies);
-
- for (i = 0; i < num_dies; i++) {
- die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
- num_ips = le16_to_cpu(dhdr->num_ips);
- ip_offset = die_offset + sizeof(*dhdr);
-
- for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
-
- if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
- if (major)
- *major = ip->major;
- if (minor)
- *minor = ip->minor;
- if (revision)
- *revision = ip->revision;
- return 0;
- }
- ip_offset += struct_size(ip, base_address, ip->num_base_address);
- }
- }
-
- return -EINVAL;
-}
-
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
int vcn_harvest_count = 0;
@@ -1939,7 +1891,6 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 0, 2):
- case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 8563dd4a7dc2..63ec6924b907 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -28,8 +28,6 @@
#define DISCOVERY_TMR_OFFSET (64 << 10)
void amdgpu_discovery_fini(struct amdgpu_device *adev);
-int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
- int *major, int *minor, int *revision);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d60fe7eb5579..b702f499f5fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -98,7 +98,7 @@ static void amdgpu_display_flip_callback(struct dma_fence *f,
static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
struct dma_fence **f)
{
- struct dma_fence *fence= *f;
+ struct dma_fence *fence = *f;
if (fence == NULL)
return false;
@@ -1252,21 +1252,21 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
};
-static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
-{ { UNDERSCAN_OFF, "off" },
+static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
+ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
-static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
-{ { AMDGPU_AUDIO_DISABLE, "off" },
+static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
+ { AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
-static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
-{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
+static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
+ { AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
@@ -1496,8 +1496,7 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
- }
- else {
+ } else {
/* No: Fake something reasonable which gives at least ok results. */
vbl_start = mode->crtc_vdisplay;
vbl_end = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 0c001bb8fc2b..6dceaf40625b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -149,7 +149,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (!bo->tbo.pin_count) {
/* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
- unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
+ unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
attach->peer2peer) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
index 8fd11497faba..ffb75d23d2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -83,6 +83,8 @@ struct amdgpu_doorbell_index {
};
uint32_t first_non_cp;
uint32_t last_non_cp;
+ uint32_t xcc1_kiq_start;
+ uint32_t xcc1_mec_ring0_start;
uint32_t max_assignment;
/* Per engine SDMA doorbell size in dword */
uint32_t sdma_doorbell_range;
@@ -164,7 +166,12 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
- AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ /* kiq/kcq from second XCD. Max 8 XCDs */
+ AMDGPU_VEGA20_DOORBELL_XCC1_KIQ_START = 0x190,
+ /* 8 compute rings per GC. Max to 0x1CE */
+ AMDGPU_VEGA20_DOORBELL_XCC1_MEC_RING0_START = 0x197,
+
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1CE,
AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b1ca1ab6d6ad..45544ebe576e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -110,9 +110,11 @@
* 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
* tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
* gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
+ * 3.53.0 - Support for GFX11 CP GFX shadowing
+ * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 52
+#define KMS_DRIVER_MINOR 54
#define KMS_DRIVER_PATCHLEVEL 0
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -1660,7 +1662,7 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
};
static const struct pci_device_id pciidlist[] = {
-#ifdef CONFIG_DRM_AMDGPU_SI
+#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index f52d0ba91a77..1994eafd3e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -42,7 +42,6 @@
#include "amdgpu_reset.h"
/*
- * Fences
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
@@ -140,7 +139,7 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
- unsigned flags)
+ unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
@@ -174,11 +173,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
adev->fence_context + ring->idx, seq);
/* Against remove in amdgpu_job_{free, free_cb} */
dma_fence_get(fence);
- }
- else
+ } else {
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
+ }
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@@ -396,7 +395,7 @@ signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
+unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
{
uint64_t emitted;
@@ -475,7 +474,7 @@ void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
*/
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int irq_type)
{
struct amdgpu_device *adev = ring->adev;
uint64_t index;
@@ -653,6 +652,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
@@ -840,6 +840,7 @@ static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
@@ -913,6 +914,7 @@ static void amdgpu_debugfs_reset_work(struct work_struct *work)
reset_work);
struct amdgpu_reset_context reset_context;
+
memset(&reset_context, 0, sizeof(reset_context));
reset_context.method = AMD_RESET_METHOD_NONE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9d3a0542c996..76dd2841cc0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -63,10 +63,10 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
}
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
- int mec, int pipe, int queue)
+ int xcc_id, int mec, int pipe, int queue)
{
return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
- adev->gfx.mec.queue_bitmap);
+ adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
@@ -204,29 +204,38 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
- int i, queue, pipe;
+ int i, j, queue, pipe;
bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings);
+ int num_xcd = (adev->gfx.num_xcd > 1) ? adev->gfx.num_xcd : 1;
if (multipipe_policy) {
- /* policy: make queues evenly cross all pipes on MEC1 only */
- for (i = 0; i < max_queues_per_mec; i++) {
- pipe = i % adev->gfx.mec.num_pipe_per_mec;
- queue = (i / adev->gfx.mec.num_pipe_per_mec) %
- adev->gfx.mec.num_queue_per_pipe;
-
- set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
- adev->gfx.mec.queue_bitmap);
+ /* policy: make queues evenly cross all pipes on MEC1 only
+ * for multiple xcc, just use the original policy for simplicity */
+ for (j = 0; j < num_xcd; j++) {
+ for (i = 0; i < max_queues_per_mec; i++) {
+ pipe = i % adev->gfx.mec.num_pipe_per_mec;
+ queue = (i / adev->gfx.mec.num_pipe_per_mec) %
+ adev->gfx.mec.num_queue_per_pipe;
+
+ set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
+ adev->gfx.mec_bitmap[j].queue_bitmap);
+ }
}
} else {
/* policy: amdgpu owns all queues in the given pipe */
- for (i = 0; i < max_queues_per_mec; ++i)
- set_bit(i, adev->gfx.mec.queue_bitmap);
+ for (j = 0; j < num_xcd; j++) {
+ for (i = 0; i < max_queues_per_mec; ++i)
+ set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
+ }
}
- dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
+ for (j = 0; j < num_xcd; j++) {
+ dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
+ bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
+ }
}
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
@@ -258,7 +267,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
}
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+ struct amdgpu_ring *ring, int xcc_id)
{
int queue_bit;
int mec, pipe, queue;
@@ -268,7 +277,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
* adev->gfx.mec.num_queue_per_pipe;
while (--queue_bit >= 0) {
- if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
+ if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
continue;
amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
@@ -294,9 +303,9 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq)
+ struct amdgpu_irq_src *irq, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
int r = 0;
spin_lock_init(&kiq->ring_lock);
@@ -305,15 +314,21 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->ring_obj = NULL;
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.kiq;
+ ring->xcc_id = xcc_id;
ring->vm_hub = AMDGPU_GFXHUB_0;
+ if (xcc_id >= 1)
+ ring->doorbell_index = adev->doorbell_index.xcc1_kiq_start +
+ xcc_id - 1;
+ else
+ ring->doorbell_index = adev->doorbell_index.kiq;
- r = amdgpu_gfx_kiq_acquire(adev, ring);
+ r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
if (r)
return r;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring->no_scheduler = true;
- sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
+ sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
@@ -327,19 +342,19 @@ void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
amdgpu_ring_fini(ring);
}
-void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
}
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
- unsigned hpd_size)
+ unsigned hpd_size, int xcc_id)
{
int r;
u32 *hpd;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
@@ -362,13 +377,18 @@ int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
/* create MQD for each compute/gfx queue */
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
- unsigned mqd_size)
+ unsigned mqd_size, int xcc_id)
{
- struct amdgpu_ring *ring = NULL;
- int r, i;
+ int r, i, j;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *ring = &kiq->ring;
+ u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+
+ /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
+ domain |= AMDGPU_GEM_DOMAIN_VRAM;
/* create MQD for KIQ */
- ring = &adev->gfx.kiq.ring;
if (!adev->enable_mes_kiq && !ring->mqd_obj) {
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
@@ -387,8 +407,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
}
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
+ kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
+ if (!kiq->mqd_backup)
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
@@ -398,13 +418,14 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
ring = &adev->gfx.gfx_ring[i];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ domain, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
return r;
}
+ ring->mqd_size = mqd_size;
/* prepare MQD backup */
adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
if (!adev->gfx.me.mqd_backup[i])
@@ -415,19 +436,21 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for each KCQ */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ ring = &adev->gfx.compute_ring[j];
if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ domain, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
return r;
}
+ ring->mqd_size = mqd_size;
/* prepare MQD backup */
- adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
- if (!adev->gfx.mec.mqd_backup[i])
+ adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.mec.mqd_backup[j])
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
}
}
@@ -435,10 +458,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
+void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_ring *ring = NULL;
- int i;
+ int i, j;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
@@ -451,43 +475,84 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring[i];
- kfree(adev->gfx.mec.mqd_backup[i]);
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ ring = &adev->gfx.compute_ring[j];
+ kfree(adev->gfx.mec.mqd_backup[j]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
- ring = &adev->gfx.kiq.ring;
+ ring = &kiq->ring;
+ kfree(kiq->mqd_backup);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
-int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
+int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
int i, r = 0;
+ int j;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- spin_lock(&adev->gfx.kiq.ring_lock);
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_compute_rings)) {
- spin_unlock(&adev->gfx.kiq.ring_lock);
- return -ENOMEM;
+ spin_lock(&kiq->ring_lock);
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_compute_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.compute_ring[i],
+ RESET_QUEUES, 0, 0);
+ }
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
- RESET_QUEUES, 0, 0);
+ if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
- if (adev->gfx.kiq.ring.sched.ready && !adev->job_hang)
+ return r;
+}
+
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int i, r = 0;
+ int j;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock(&kiq->ring_lock);
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_gfx_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.gfx_ring[i],
+ PREEMPT_QUEUES, 0, 0);
+ }
+ }
+
+ if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
r = amdgpu_ring_test_helper(kiq_ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&kiq->ring_lock);
return r;
}
@@ -505,18 +570,18 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
return set_resource_bit;
}
-int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
+int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
uint64_t queue_mask = 0;
- int r, i;
+ int r, i, j;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
return -EINVAL;
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
+ if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
continue;
/* This situation may be hit in the future if a new HW
@@ -532,25 +597,66 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
- spin_lock(&adev->gfx.kiq.ring_lock);
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_compute_rings +
- kiq->pmf->set_resources_size);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- spin_unlock(&adev->gfx.kiq.ring_lock);
- return r;
+ spin_lock(&kiq->ring_lock);
+ /* No need to map kcq on the slave */
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_compute_rings +
+ kiq->pmf->set_resources_size);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
+
+ if (adev->enable_mes)
+ queue_mask = ~0ULL;
+
+ kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[i]);
+ }
}
- if (adev->enable_mes)
- queue_mask = ~0ULL;
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+ if (r)
+ DRM_ERROR("KCQ enable failed\n");
+
+ return r;
+}
+
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int r, i, j;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
+ return -EINVAL;
+
+ spin_lock(&kiq->ring_lock);
+ /* No need to map kcq on the slave */
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_gfx_rings);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
+ return r;
+ }
- kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.gfx_ring[i]);
+ }
+ }
r = amdgpu_ring_test_helper(kiq_ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&kiq->ring_lock);
if (r)
DRM_ERROR("KCQ enable failed\n");
@@ -788,7 +894,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq, reg_val_offs = 0, value = 0;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
if (amdgpu_device_skip_hw_access(adev))
@@ -856,7 +962,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
signed long r, cnt = 0;
unsigned long flags;
uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
@@ -1060,3 +1166,9 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
}
}
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
+{
+ return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
+ adev->gfx.num_xcc_per_xcp : 1));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index bfabea76d166..2755f00ac19a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -75,8 +75,10 @@ struct amdgpu_mec {
u32 num_mec;
u32 num_pipe_per_mec;
u32 num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
+};
+struct amdgpu_mec_bitmap {
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};
@@ -120,6 +122,7 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
+ void *mqd_backup;
};
/*
@@ -230,11 +233,18 @@ struct amdgpu_gfx_ras {
struct amdgpu_iv_entry *entry);
};
+struct amdgpu_gfx_shadow_info {
+ u32 shadow_size;
+ u32 shadow_alignment;
+ u32 csa_size;
+ u32 csa_alignment;
+};
+
struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance);
+ u32 sh_num, u32 instance, int xcc_id);
void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t *dst, int *no_fields);
void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd,
@@ -247,6 +257,8 @@ struct amdgpu_gfx_funcs {
u32 queue, u32 vmid);
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
+ int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info);
};
struct sq_work {
@@ -296,7 +308,8 @@ struct amdgpu_gfx {
struct amdgpu_ce ce;
struct amdgpu_me me;
struct amdgpu_mec mec;
- struct amdgpu_kiq kiq;
+ struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES];
+ struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES];
struct amdgpu_imu imu;
bool rs64_enable; /* firmware format */
const struct firmware *me_fw; /* ME firmware */
@@ -376,15 +389,18 @@ struct amdgpu_gfx {
struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS];
struct amdgpu_ring_mux muxer;
+ bool cp_gfx_shadow; /* for gfx11 */
+
enum amdgpu_gfx_partition partition_mode;
uint32_t num_xcd;
uint32_t num_xcc_per_xcp;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
-#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid))
#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si)))
/**
* amdgpu_gfx_create_bitmask - create a bitmask
@@ -404,19 +420,21 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- struct amdgpu_irq_src *irq);
+ struct amdgpu_irq_src *irq, int xcc_id);
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
-void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id);
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
- unsigned hpd_size);
+ unsigned hpd_size, int xcc_id);
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
- unsigned mqd_size);
-void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev);
-int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev);
-int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev);
+ unsigned mqd_size, int xcc_id);
+void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id);
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
@@ -425,8 +443,8 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
int pipe, int queue);
void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
-bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
- int pipe, int queue);
+bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id,
+ int mec, int pipe, int queue);
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
@@ -458,4 +476,6 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id)
int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 4ff348e10e4d..c955c3f060cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -136,7 +136,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
unsigned fence_flags = 0;
- bool secure;
+ bool secure, init_shadow;
+ u64 shadow_va, csa_va, gds_va;
+ int vmid = AMDGPU_JOB_GET_VMID(job);
unsigned i;
int r = 0;
@@ -150,9 +152,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
vm = job->vm;
fence_ctx = job->base.s_fence ?
job->base.s_fence->scheduled.context : 0;
+ shadow_va = job->shadow_va;
+ csa_va = job->csa_va;
+ gds_va = job->gds_va;
+ init_shadow = job->init_shadow;
} else {
vm = NULL;
fence_ctx = 0;
+ shadow_va = 0;
+ csa_va = 0;
+ gds_va = 0;
+ init_shadow = false;
}
if (!ring->sched.ready && !ring->is_mes_queue) {
@@ -212,7 +222,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
amdgpu_ring_ib_begin(ring);
- if (job && ring->funcs->init_cond_exec)
+
+ if (ring->funcs->emit_gfx_shadow)
+ amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va,
+ init_shadow, vmid);
+
+ if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring);
amdgpu_device_flush_hdp(adev, ring);
@@ -263,6 +278,18 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
+ if (ring->funcs->emit_gfx_shadow) {
+ amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
+
+ if (ring->funcs->init_cond_exec) {
+ unsigned ce_offset = ~0;
+
+ ce_offset = amdgpu_ring_init_cond_exec(ring);
+ if (ce_offset != ~0 && ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, ce_offset);
+ }
+ }
+
r = amdgpu_fence_emit(ring, f, job, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -406,6 +433,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
else
tmo = tmo_gfx;
+ /* skip ib test on the slave kcq */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
+ !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id))
+ continue;
+
r = amdgpu_ring_test_ib(ring, tmo);
if (!r) {
DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 1d5af50331e4..fceb3b384955 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -270,7 +270,7 @@ void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
entry->timestamp_src = dw[2] >> 31;
entry->pasid = dw[3] & 0xffff;
- entry->pasid_src = dw[3] >> 31;
+ entry->node_id = (dw[3] >> 16) & 0xff;
entry->src_data[0] = dw[4];
entry->src_data[1] = dw[5];
entry->src_data[2] = dw[6];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index be243adf3e65..1c747ac4129a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -53,7 +53,7 @@ struct amdgpu_iv_entry {
uint64_t timestamp;
unsigned timestamp_src;
unsigned pasid;
- unsigned pasid_src;
+ unsigned node_id;
unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
const uint32_t *iv_entry;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 52f2e313ea17..3f9804f956c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -67,6 +67,12 @@ struct amdgpu_job {
uint64_t uf_addr;
uint64_t uf_sequence;
+ /* virtual addresses for shadow/GDS/CSA */
+ uint64_t shadow_va;
+ uint64_t csa_va;
+ uint64_t gds_va;
+ bool init_shadow;
+
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 0efb38539d70..1d3b224b8b28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -876,6 +876,19 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
dev_info->mall_size = adev->gmc.mall_size;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info);
+ if (!ret) {
+ dev_info->shadow_size = shadow_info.shadow_size;
+ dev_info->shadow_alignment = shadow_info.shadow_alignment;
+ dev_info->csa_size = shadow_info.csa_size;
+ dev_info->csa_alignment = shadow_info.csa_alignment;
+ }
+ }
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1140,6 +1153,15 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
kfree(caps);
return r;
}
+ case AMDGPU_INFO_MAX_IBS: {
+ uint32_t max_ibs[AMDGPU_HW_IP_NUM];
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+ max_ibs[i] = amdgpu_ring_max_ibs(i);
+
+ return copy_to_user(out, max_ibs,
+ min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1449,7 +1471,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
int ret, i;
static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
-#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type
+#define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type
TA_FW_NAME(XGMI),
TA_FW_NAME(RAS),
TA_FW_NAME(HDCP),
@@ -1548,7 +1570,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
fw_info.feature, fw_info.ver);
/* RLCV */
- query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9d7e6e0e73ed..aa37b703c718 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -411,7 +411,7 @@ static int psp_sw_init(void *handle)
if ((psp_get_runtime_db_entry(adev,
PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
&scpm_entry)) &&
- (SCPM_DISABLE != scpm_entry.scpm_status)) {
+ (scpm_entry.scpm_status != SCPM_DISABLE)) {
adev->scpm_enabled = true;
adev->scpm_status = scpm_entry.scpm_status;
} else {
@@ -458,10 +458,9 @@ static int psp_sw_init(void *handle)
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) {
- ret= psp_sysfs_init(adev);
- if (ret) {
+ ret = psp_sysfs_init(adev);
+ if (ret)
return ret;
- }
}
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
@@ -520,6 +519,8 @@ static int psp_sw_fini(void *handle)
kfree(cmd);
cmd = NULL;
+ psp_free_shared_bufs(psp);
+
if (psp->km_ring.ring_mem)
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&psp->km_ring.ring_mem_mc_addr,
@@ -643,7 +644,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
- memcpy((void*)&cmd->resp, (void*)&psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
+ memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
@@ -828,7 +829,7 @@ static int psp_tmr_load(struct psp_context *psp)
}
static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
- struct psp_gfx_cmd_resp *cmd)
+ struct psp_gfx_cmd_resp *cmd)
{
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
@@ -1065,7 +1066,7 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
struct ta_context *context)
{
cmd->cmd_id = context->ta_load_type;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
@@ -1136,9 +1137,8 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context)
context->resp_status = cmd->resp.status;
- if (!ret) {
+ if (!ret)
context->session_id = cmd->resp.session_id;
- }
release_psp_cmd_buf(psp);
@@ -1465,8 +1465,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_ras_intr_triggered())
return ret;
- if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
- {
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
DRM_WARN("RAS: Unsupported Interface");
return -EINVAL;
}
@@ -1476,8 +1475,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
dev_warn(psp->adev->dev, "ECC switch disabled\n");
ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
- }
- else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev,
"RAS internal register access blocked\n");
@@ -1573,11 +1571,10 @@ int psp_ras_initialize(struct psp_context *psp)
if (ret)
dev_warn(adev->dev, "PSP set boot config failed\n");
else
- dev_warn(adev->dev, "GECC will be disabled in next boot cycle "
- "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
}
} else {
- if (1 == boot_cfg) {
+ if (boot_cfg == 1) {
dev_info(adev->dev, "GECC is enabled\n");
} else {
/* enable GECC in next boot cycle if it is disabled
@@ -2363,7 +2360,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
}
static int psp_execute_non_psp_fw_load(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode)
+ struct amdgpu_firmware_info *ucode)
{
int ret = 0;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
@@ -2402,9 +2399,8 @@ static int psp_load_smu_fw(struct psp_context *psp)
(adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
- if (ret) {
+ if (ret)
DRM_WARN("Failed to set MP1 state prepare for reload\n");
- }
}
ret = psp_execute_non_psp_fw_load(psp, ucode);
@@ -2655,8 +2651,6 @@ static int psp_hw_fini(void *handle)
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- psp_free_shared_bufs(psp);
-
return 0;
}
@@ -2716,9 +2710,8 @@ static int psp_suspend(void *handle)
}
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
- if (ret) {
+ if (ret)
DRM_ERROR("PSP ring stop failed\n");
- }
out:
return ret;
@@ -2967,7 +2960,7 @@ static int parse_sos_bin_descriptor(struct psp_context *psp,
psp->sos.fw_version = le32_to_cpu(desc->fw_version);
psp->sos.feature_version = le32_to_cpu(desc->fw_version);
psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->sos.start_addr = ucode_start_addr;
+ psp->sos.start_addr = ucode_start_addr;
break;
case PSP_FW_TYPE_PSP_SYS_DRV:
psp->sys.fw_version = le32_to_cpu(desc->fw_version);
@@ -3491,7 +3484,7 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size
drm_dev_exit(idx);
}
-static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(usbc_pd_fw, 0644,
psp_usbc_pd_fw_sysfs_read,
psp_usbc_pd_fw_sysfs_write);
@@ -3618,6 +3611,7 @@ int amdgpu_psp_sysfs_init(struct amdgpu_device *adev)
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
if (!psp->adev) {
psp->adev = adev;
psp_v13_0_set_psp_funcs(psp);
@@ -3673,8 +3667,7 @@ static void psp_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
}
-const struct amdgpu_ip_block_version psp_v3_1_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 3,
.minor = 1,
@@ -3682,8 +3675,7 @@ const struct amdgpu_ip_block_version psp_v3_1_ip_block =
.funcs = &psp_ip_funcs,
};
-const struct amdgpu_ip_block_version psp_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 10,
.minor = 0,
@@ -3691,8 +3683,7 @@ const struct amdgpu_ip_block_version psp_v10_0_ip_block =
.funcs = &psp_ip_funcs,
};
-const struct amdgpu_ip_block_version psp_v11_0_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 11,
.minor = 0,
@@ -3708,8 +3699,7 @@ const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
.funcs = &psp_ip_funcs,
};
-const struct amdgpu_ip_block_version psp_v12_0_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
.major = 12,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 3ab8a88789c8..22f401fd1901 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1597,8 +1597,7 @@ static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{
/* Fatal error events are handled on host side */
- if (amdgpu_sriov_vf(adev) ||
- !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
+ if (amdgpu_sriov_vf(adev))
return;
if (adev->nbio.ras &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index dc474b809604..1e9ae3bc59d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -50,6 +50,26 @@
*/
/**
+ * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
+ *
+ * @type: ring type for which to return the limit.
+ */
+unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
+{
+ switch (type) {
+ case AMDGPU_RING_TYPE_GFX:
+ /* Need to keep at least 192 on GFX7+ for old radv. */
+ return 192;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ return 125;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ return 16;
+ default:
+ return 49;
+ }
+}
+
+/**
* amdgpu_ring_alloc - allocate space on the ring buffer
*
* @ring: amdgpu_ring structure holding ring information
@@ -182,6 +202,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
int sched_hw_submission = amdgpu_sched_hw_submission;
u32 *num_sched;
u32 hw_ip;
+ unsigned int max_ibs_dw;
/* Set the hw submission limit higher for KIQ because
* it's used for a number of gfx/compute tasks by both
@@ -290,6 +311,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
+ max_ibs_dw = ring->funcs->emit_frame_size +
+ amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
+ max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ if (WARN_ON(max_ibs_dw > max_dw)) {
+ max_dw = max_ibs_dw;
+ }
+
ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
ring->buf_mask = (ring->ring_size / 4) - 1;
@@ -361,6 +390,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
+ } else {
+ kfree(ring->fence_drv.fences);
}
dma_fence_put(ring->vmid_wait);
@@ -478,6 +509,59 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
.llseek = default_llseek
};
+static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ volatile u32 *mqd;
+ int r;
+ uint32_t value, result;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ result = 0;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
+ if (r) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ return r;
+ }
+
+ while (size) {
+ if (*pos >= ring->mqd_size)
+ goto done;
+
+ value = mqd[*pos/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto done;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+
+done:
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ mqd = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r)
+ return r;
+
+ return result;
+}
+
+static const struct file_operations amdgpu_debugfs_mqd_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_mqd_read,
+ .llseek = default_llseek
+};
+
#endif
void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
@@ -493,6 +577,12 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
&amdgpu_debugfs_ring_fops,
ring->ring_size + 12);
+ if (ring->mqd_obj) {
+ sprintf(name, "amdgpu_mqd_%s", ring->name);
+ debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
+ &amdgpu_debugfs_mqd_fops,
+ ring->mqd_size);
+ }
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index d8749444b689..b0dc0a0c2631 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -212,6 +212,8 @@ struct amdgpu_ring_funcs {
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
+ void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
+ u64 gds_va, bool init_shadow, int vmid);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
@@ -256,6 +258,7 @@ struct amdgpu_ring {
struct amdgpu_bo *mqd_obj;
uint64_t mqd_gpu_addr;
void *mqd_ptr;
+ unsigned mqd_size;
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
@@ -309,6 +312,7 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
+#define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v)))
#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
@@ -319,6 +323,7 @@ struct amdgpu_ring {
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
+unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 85fb730d9fc8..d3bed9a3e61f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -34,9 +34,9 @@
*
* Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
*/
-void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
- if (adev->gfx.rlc.in_safe_mode)
+ if (adev->gfx.rlc.in_safe_mode[xcc_id])
return;
/* if RLC is not enabled, do nothing */
@@ -46,8 +46,8 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- adev->gfx.rlc.funcs->set_safe_mode(adev);
- adev->gfx.rlc.in_safe_mode = true;
+ adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id);
+ adev->gfx.rlc.in_safe_mode[xcc_id] = true;
}
}
@@ -58,9 +58,9 @@ void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
*
* Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
*/
-void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
- if (!(adev->gfx.rlc.in_safe_mode))
+ if (!(adev->gfx.rlc.in_safe_mode[xcc_id]))
return;
/* if RLC is not enabled, do nothing */
@@ -70,8 +70,8 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- adev->gfx.rlc.funcs->unset_safe_mode(adev);
- adev->gfx.rlc.in_safe_mode = false;
+ adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id);
+ adev->gfx.rlc.in_safe_mode[xcc_id] = false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index 23f060db9255..80b263646966 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -157,8 +157,8 @@ typedef struct _RLC_TABLE_OF_CONTENT {
struct amdgpu_rlc_funcs {
bool (*is_rlc_enabled)(struct amdgpu_device *adev);
- void (*set_safe_mode)(struct amdgpu_device *adev);
- void (*unset_safe_mode)(struct amdgpu_device *adev);
+ void (*set_safe_mode)(struct amdgpu_device *adev, int xcc_id);
+ void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
@@ -201,7 +201,7 @@ struct amdgpu_rlc {
u32 cp_table_size;
/* safe mode for updating CG/PG state */
- bool in_safe_mode;
+ bool in_safe_mode[8];
const struct amdgpu_rlc_funcs *funcs;
/* for firmware data */
@@ -260,8 +260,8 @@ struct amdgpu_rlc {
struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl;
};
-void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
-void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id);
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 2cd081cbf706..f61f07575f63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1803,26 +1803,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
- /* Compute GTT size, either based on 1/2 the size of RAM size
- * or whatever the user passed on module init */
- if (amdgpu_gtt_size == -1) {
- struct sysinfo si;
-
- si_meminfo(&si);
- /* Certain GL unit tests for large textures can cause problems
- * with the OOM killer since there is no way to link this memory
- * to a process. This was originally mitigated (but not necessarily
- * eliminated) by limiting the GTT size. The problem is this limit
- * is often too low for many modern games so just make the limit 1/2
- * of system memory which aligns with TTM. The OOM accounting needs
- * to be addressed, but we shouldn't prevent common 3D applications
- * from being usable just to potentially mitigate that corner case.
- */
- gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- (u64)si.totalram * si.mem_unit / 2);
- } else {
+ /* Compute GTT size, either based on TTM limit
+ * or whatever the user passed on module init.
+ */
+ if (amdgpu_gtt_size == -1)
+ gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
+ else
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
- }
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2e2cbaa7fde..1311e42ab8e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -65,8 +65,8 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
adev->cg_flags = 0;
adev->pg_flags = 0;
- /* enable mcbp for sriov asic_type before soc21 */
- amdgpu_mcbp = (adev->asic_type < CHIP_IP_DISCOVERY) ? 1 : 0;
+ /* enable mcbp for sriov */
+ amdgpu_mcbp = 1;
}
@@ -74,7 +74,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
signed long r, cnt = 0;
unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index de6d10390ab2..5641cf05d856 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1141,12 +1141,12 @@ static uint32_t cik_get_register_value(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index f5b5ce1051a2..2e0234b43f43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3490,7 +3490,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance);
+ u32 sh_num, u32 instance, int xcc_id);
static u32 gfx_v10_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
static int gfx_v10_0_rlc_backdoor_autoload_buffer_init(struct amdgpu_device *adev);
@@ -3568,7 +3568,7 @@ static void gfx10_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
- if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
+ if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
return;
}
@@ -3636,7 +3636,7 @@ static const struct kiq_pm4_funcs gfx_v10_0_kiq_pm4_funcs = {
static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- adev->gfx.kiq.pmf = &gfx_v10_0_kiq_pm4_funcs;
+ adev->gfx.kiq[0].pmf = &gfx_v10_0_kiq_pm4_funcs;
}
static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
@@ -4219,7 +4219,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
const struct gfx_firmware_header_v1_0 *mec_hdr = NULL;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -4550,7 +4550,7 @@ static int gfx_v10_0_sw_init(void *handle)
/* KIQ event */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
GFX_10_1__SRCID__CP_IB2_INTERRUPT_PKT,
- &adev->gfx.kiq.irq);
+ &adev->gfx.kiq[0].irq);
if (r)
return r;
@@ -4614,8 +4614,8 @@ static int gfx_v10_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
- j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v10_0_compute_ring_init(adev, ring_id,
@@ -4629,19 +4629,19 @@ static int gfx_v10_0_sw_init(void *handle)
}
if (!adev->enable_mes_kiq) {
- r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
}
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v10_compute_mqd), 0);
if (r)
return r;
@@ -4690,11 +4690,11 @@ static int gfx_v10_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
if (!adev->enable_mes_kiq) {
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
}
gfx_v10_0_pfp_fini(adev);
@@ -4712,7 +4712,7 @@ static int gfx_v10_0_sw_fini(void *handle)
}
static void gfx_v10_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -4772,13 +4772,13 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 6))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v10_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -4907,7 +4907,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);
wgp_active_bitmap = gfx_v10_0_get_wgp_active_bitmap_per_sh(adev);
/*
* Set corresponding TCP bits for the inactive WGPs in
@@ -4940,7 +4940,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
}
}
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -6214,7 +6214,7 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
break;
}
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@@ -6423,55 +6423,6 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-#ifdef BRING_UP_DEBUG
-static int gfx_v10_0_gfx_queue_init_register(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct v10_gfx_mqd *mqd = ring->mqd_ptr;
-
- /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
-
- /* set GFX_MQD_BASE */
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
- WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
-
- /* set GFX_MQD_CONTROL */
- WREG32_SOC15(GC, 0, mmCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
-
- /* set GFX_HQD_VMID to 0 */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
-
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUEUE_PRIORITY,
- mqd->cp_gfx_hqd_queue_priority);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
-
- /* set GFX_HQD_BASE, similar as CP_RB_BASE */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
-
- /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
-
- /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
-
- /* set RB_WPTR_POLL_ADDR */
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
-
- /* set RB_DOORBELL_CONTROL */
- WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
-
- /* active the queue */
- WREG32_SOC15(GC, 0, mmCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
-
- return 0;
-}
-#endif
-
static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -6492,59 +6443,23 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1)
gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
-#ifdef BRING_UP_DEBUG
- gfx_v10_0_gfx_queue_init_register(ring);
-#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) {
- /* reset mqd with the backup copy */
+ } else {
+ /* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
amdgpu_ring_clear_ring(ring);
-#ifdef BRING_UP_DEBUG
- mutex_lock(&adev->srbm_mutex);
- nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- gfx_v10_0_gfx_queue_init_register(ring);
- nv_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-#endif
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- int r, i;
-
- if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
- return -EINVAL;
-
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_gfx_rings);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
-
- return amdgpu_ring_test_helper(kiq_ring);
-}
-#endif
-
static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
@@ -6567,11 +6482,11 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
if (r)
goto done;
}
-#ifndef BRING_UP_DEBUG
- r = gfx_v10_0_kiq_enable_kgq(adev);
+
+ r = amdgpu_gfx_enable_kgq(adev, 0);
if (r)
goto done;
-#endif
+
r = gfx_v10_0_cp_gfx_start(adev);
if (r)
goto done;
@@ -6812,14 +6727,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_compute_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
@@ -6841,8 +6755,8 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
}
return 0;
@@ -6864,17 +6778,14 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
-
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -6885,7 +6796,7 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@@ -6927,7 +6838,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = amdgpu_gfx_enable_kcq(adev);
+ r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
@@ -7240,47 +7151,20 @@ static int gfx_v10_0_hw_init(void *handle)
return r;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_gfx_rings))
- return -ENOMEM;
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
- PREEMPT_QUEUES, 0, 0);
- if (!adev->job_hang)
- return amdgpu_ring_test_helper(kiq_ring);
- else
- return 0;
-}
-#endif
-
static int gfx_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
if (!adev->no_hw_access) {
-#ifndef BRING_UP_DEBUG
if (amdgpu_async_gfx_ring) {
- r = gfx_v10_0_kiq_disable_kgq(adev);
- if (r)
+ if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
-#endif
- if (amdgpu_gfx_disable_kcq(adev))
+
+ if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
}
@@ -7572,7 +7456,7 @@ static bool gfx_v10_0_is_rlc_enabled(struct amdgpu_device *adev)
return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
}
-static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -7613,7 +7497,7 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
@@ -7960,7 +7844,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d
static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
/* enable FGCG firstly*/
@@ -7999,7 +7883,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v10_0_enable_gui_idle_interrupt(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -8093,11 +7977,11 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
gfx_v10_cntl_power_gating(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
@@ -8640,7 +8524,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@@ -9148,7 +9032,7 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+ struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
if (ring->me == 1)
target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
@@ -9192,7 +9076,7 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
u8 me_id, pipe_id, queue_id;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+ struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
me_id = (entry->ring_id & 0x0c) >> 2;
pipe_id = (entry->ring_id & 0x03) >> 0;
@@ -9369,7 +9253,7 @@ static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v10_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v10_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v10_0_ring_funcs_gfx;
@@ -9403,8 +9287,8 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
adev->gfx.eop_irq.funcs = &gfx_v10_0_eop_irq_funcs;
- adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
- adev->gfx.kiq.irq.funcs = &gfx_v10_0_kiq_irq_funcs;
+ adev->gfx.kiq[0].irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
+ adev->gfx.kiq[0].irq.funcs = &gfx_v10_0_kiq_irq_funcs;
adev->gfx.priv_reg_irq.num_types = 1;
adev->gfx.priv_reg_irq.funcs = &gfx_v10_0_priv_reg_irq_funcs;
@@ -9541,7 +9425,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
@@ -9562,7 +9446,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
- gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v10_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index a9da0486467a..f09e2558e73b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -112,7 +112,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance);
+ u32 sh_num, u32 instance, int xcc_id);
static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
@@ -123,8 +123,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint8_t dst_sel);
-static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
-static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
+static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
+static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
bool enable);
@@ -192,7 +192,7 @@ static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
- if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
+ if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
return;
}
@@ -260,7 +260,7 @@ static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
+ adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
}
static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
@@ -463,6 +463,23 @@ out:
return err;
}
+static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ if ((adev->gfx.me_fw_version >= 1505) &&
+ (adev->gfx.pfp_fw_version >= 1600) &&
+ (adev->gfx.mec_fw_version >= 512))
+ adev->gfx.cp_gfx_shadow = true;
+ break;
+ default:
+ adev->gfx.cp_gfx_shadow = false;
+ break;
+ }
+}
+
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -539,6 +556,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
+ gfx_v11_0_check_fw_cp_gfx_shadow(adev);
out:
if (err) {
amdgpu_ucode_release(&adev->gfx.pfp_fw);
@@ -699,7 +717,7 @@ static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
u32 *hpd;
size_t mec_hpd_size;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -800,6 +818,27 @@ static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
soc21_grbm_select(adev, me, pipe, q, vm);
}
+/* all sizes are in bytes */
+#define MQD_SHADOW_BASE_SIZE 73728
+#define MQD_SHADOW_BASE_ALIGNMENT 256
+#define MQD_FWWORKAREA_SIZE 484
+#define MQD_FWWORKAREA_ALIGNMENT 256
+
+static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info)
+{
+ if (adev->gfx.cp_gfx_shadow) {
+ shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
+ shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
+ shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
+ shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
+ return 0;
+ } else {
+ memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
+ return -ENOTSUPP;
+ }
+}
+
static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v11_0_select_se_sh,
@@ -808,6 +847,7 @@ static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
.update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
+ .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
};
static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1374,8 +1414,8 @@ static int gfx_v11_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
- j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v11_0_compute_ring_init(adev, ring_id,
@@ -1389,19 +1429,19 @@ static int gfx_v11_0_sw_init(void *handle)
}
if (!adev->enable_mes_kiq) {
- r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
}
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
if (r)
return r;
@@ -1463,11 +1503,11 @@ static int gfx_v11_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
if (!adev->enable_mes_kiq) {
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
}
gfx_v11_0_pfp_fini(adev);
@@ -1484,7 +1524,7 @@ static int gfx_v11_0_sw_fini(void *handle)
}
static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -3337,7 +3377,7 @@ static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
}
- adev->gfx.kiq.ring.sched.ready = enable;
+ adev->gfx.kiq[0].ring.sched.ready = enable;
udelay(50);
}
@@ -3640,55 +3680,6 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-#ifdef BRING_UP_DEBUG
-static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- struct v11_gfx_mqd *mqd = ring->mqd_ptr;
-
- /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
-
- /* set GFX_MQD_BASE */
- WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
- WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
-
- /* set GFX_MQD_CONTROL */
- WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
-
- /* set GFX_HQD_VMID to 0 */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
-
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY,
- mqd->cp_gfx_hqd_queue_priority);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
-
- /* set GFX_HQD_BASE, similar as CP_RB_BASE */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
-
- /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
-
- /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
-
- /* set RB_WPTR_POLL_ADDR */
- WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
- WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
-
- /* set RB_DOORBELL_CONTROL */
- WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
-
- /* active the queue */
- WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
-
- return 0;
-}
-#endif
-
static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3700,59 +3691,23 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
amdgpu_ring_init_mqd(ring);
-#ifdef BRING_UP_DEBUG
- gfx_v11_0_gfx_queue_init_register(ring);
-#endif
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) {
- /* reset mqd with the backup copy */
+ } else {
+ /* restore mqd with the backup copy */
if (adev->gfx.me.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
*ring->wptr_cpu_addr = 0;
amdgpu_ring_clear_ring(ring);
-#ifdef BRING_UP_DEBUG
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
- gfx_v11_0_gfx_queue_init_register(ring);
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-#endif
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
- int r, i;
-
- if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
- return -EINVAL;
-
- r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
- adev->gfx.num_gfx_rings);
- if (r) {
- DRM_ERROR("Failed to lock KIQ (%d).\n", r);
- return r;
- }
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
-
- return amdgpu_ring_test_helper(kiq_ring);
-}
-#endif
-
static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
{
int r, i;
@@ -3775,11 +3730,11 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
if (r)
goto done;
}
-#ifndef BRING_UP_DEBUG
- r = gfx_v11_0_kiq_enable_kgq(adev);
+
+ r = amdgpu_gfx_enable_kgq(adev, 0);
if (r)
goto done;
-#endif
+
r = gfx_v11_0_cp_gfx_start(adev);
if (r)
goto done;
@@ -4035,14 +3990,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v11_compute_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v11_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
/* reset ring buffer */
ring->wptr = 0;
@@ -4064,8 +4018,8 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
}
return 0;
@@ -4087,17 +4041,14 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
-
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -4108,7 +4059,7 @@ static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@@ -4153,7 +4104,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = amdgpu_gfx_enable_kcq(adev);
+ r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
@@ -4414,49 +4365,21 @@ static int gfx_v11_0_hw_init(void *handle)
return r;
}
-#ifndef BRING_UP_DEBUG
-static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
-{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i, r = 0;
-
- if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
- return -EINVAL;
-
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_gfx_rings))
- return -ENOMEM;
-
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
- PREEMPT_QUEUES, 0, 0);
-
- if (adev->gfx.kiq.ring.sched.ready)
- r = amdgpu_ring_test_helper(kiq_ring);
-
- return r;
-}
-#endif
-
static int gfx_v11_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int r;
amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
if (!adev->no_hw_access) {
-#ifndef BRING_UP_DEBUG
if (amdgpu_async_gfx_ring) {
- r = gfx_v11_0_kiq_disable_kgq(adev);
- if (r)
+ if (amdgpu_gfx_disable_kgq(adev, 0))
DRM_ERROR("KGQ disable failed\n");
}
-#endif
- if (amdgpu_gfx_disable_kcq(adev))
+
+ if (amdgpu_gfx_disable_kcq(adev, 0))
DRM_ERROR("KCQ disable failed\n");
amdgpu_mes_kiq_hw_fini(adev);
@@ -4533,7 +4456,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_set_safe_mode(adev);
+ gfx_v11_0_set_safe_mode(adev, 0);
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
@@ -4633,7 +4556,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev);
+ gfx_v11_0_unset_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
@@ -4675,24 +4598,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
uint64_t clock;
uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
- amdgpu_gfx_off_ctrl(adev, false);
- mutex_lock(&adev->gfx.gpu_clock_mutex);
if (amdgpu_sriov_vf(adev)) {
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
} else {
+ preempt_disable();
clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
if (clock_counter_hi_pre != clock_counter_hi_after)
clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+ preempt_enable();
}
clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
- mutex_unlock(&adev->gfx.gpu_clock_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
+
return clock;
}
@@ -4799,7 +4725,7 @@ static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
}
-static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -4818,7 +4744,7 @@ static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
}
@@ -5046,7 +4972,7 @@ static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *ade
static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
@@ -5066,7 +4992,7 @@ static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
AMD_CG_SUPPORT_GFX_3D_CGLS))
gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5134,11 +5060,11 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
gfx_v11_cntl_power_gating(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v11_0_set_powergating_state(void *handle,
@@ -5591,6 +5517,29 @@ static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0);
}
+static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
+ u64 shadow_va, u64 csa_va,
+ u64 gds_va, bool init_shadow,
+ int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (!adev->gfx.cp_gfx_shadow)
+ return;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
+ amdgpu_ring_write(ring, lower_32_bits(shadow_va));
+ amdgpu_ring_write(ring, upper_32_bits(shadow_va));
+ amdgpu_ring_write(ring, lower_32_bits(gds_va));
+ amdgpu_ring_write(ring, upper_32_bits(gds_va));
+ amdgpu_ring_write(ring, lower_32_bits(csa_va));
+ amdgpu_ring_write(ring, upper_32_bits(csa_va));
+ amdgpu_ring_write(ring, shadow_va ?
+ PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
+ amdgpu_ring_write(ring, init_shadow ?
+ PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
+}
+
static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -5622,7 +5571,7 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@@ -6120,7 +6069,7 @@ static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
uint32_t tmp, target;
- struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
+ struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
target += ring->pipe;
@@ -6211,6 +6160,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.set_wptr = gfx_v11_0_ring_set_wptr_gfx,
.emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */
+ 9 + /* SET_Q_PREEMPTION_MODE */
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
@@ -6237,6 +6187,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
+ .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
.init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v11_0_ring_preempt_ib,
@@ -6317,7 +6268,7 @@ static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
@@ -6474,7 +6425,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
counter = 0;
- gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 8 && j < 2)
gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
@@ -6506,7 +6457,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
active_cu_number += counter;
}
}
- gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index c41219e23151..7cb72bf1acdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1285,7 +1285,7 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
static void gfx_v6_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
- u32 sh_num, u32 instance)
+ u32 sh_num, u32 instance, int xcc_id)
{
u32 data;
@@ -1438,12 +1438,12 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on SI */
- gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
}
/* GRBM_GFX_INDEX has a different offset on SI */
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
@@ -1459,14 +1459,14 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v6_0_get_rb_active_bitmap(adev);
active_rbs |= data <<
((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -1487,7 +1487,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -1496,7 +1496,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG);
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1535,7 +1535,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = RREG32(mmSPI_STATIC_THREAD_MGMT_3);
active_cu = gfx_v6_0_get_cu_enabled(adev);
@@ -1550,7 +1550,7 @@ static void gfx_v6_0_setup_spi(struct amdgpu_device *adev)
}
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -2391,7 +2391,7 @@ static void gfx_v6_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD(RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
if (!enable) {
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmSPI_LB_CU_MASK, 0x00ff);
}
}
@@ -3073,7 +3073,7 @@ static int gfx_v6_0_sw_init(void *handle)
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
- r = amdgpu_ring_init(adev, ring, 1024,
+ r = amdgpu_ring_init(adev, ring, 2048,
&adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
AMDGPU_RING_PRIO_DEFAULT, NULL);
@@ -3571,7 +3571,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
@@ -3593,7 +3593,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
}
}
- gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 9d5c1e29b4a3..d56dda5fc588 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1552,7 +1552,8 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
* Select which SE, SH combinations to address.
*/
static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num, u32 instance)
+ u32 se_num, u32 sh_num, u32 instance,
+ int xcc_id)
{
u32 data;
@@ -1732,13 +1733,13 @@ gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on CI+ */
- gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on CI+ */
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
/**
@@ -1761,13 +1762,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v7_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -1790,7 +1791,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -1801,7 +1802,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG_1);
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1911,7 +1912,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2728,7 +2729,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
u32 *hpd;
size_t mec_hpd_size;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -3301,7 +3302,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3309,7 +3310,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3361,7 +3362,7 @@ static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
u32 tmp, i, mask;
@@ -3383,7 +3384,7 @@ static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
u32 tmp;
@@ -3474,7 +3475,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(mmRLC_LB_PARAMS, 0x00600408);
WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3530,7 +3531,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3584,7 +3585,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3635,7 +3636,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
tmp = gfx_v7_0_halt_rlc(adev);
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -4456,7 +4457,8 @@ static int gfx_v7_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v7_0_compute_ring_init(adev,
@@ -5114,7 +5116,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v7_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
@@ -5135,7 +5137,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b1f2684d854a..2ae7f167985f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1304,7 +1304,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
u32 *hpd;
size_t mec_hpd_size;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -2001,7 +2001,8 @@ static int gfx_v8_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v8_0_compute_ring_init(adev,
@@ -2015,19 +2016,19 @@ static int gfx_v8_0_sw_init(void *handle)
}
}
- r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
/* create MQD for all compute queues as well as KIQ for SRIOV case */
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
if (r)
return r;
@@ -2050,9 +2051,9 @@ static int gfx_v8_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
gfx_v8_0_mec_fini(adev);
amdgpu_gfx_rlc_fini(adev);
@@ -3394,7 +3395,8 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num, u32 instance)
+ u32 se_num, u32 sh_num, u32 instance,
+ int xcc_id)
{
u32 data;
@@ -3578,13 +3580,13 @@ gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
}
/* GRBM_GFX_INDEX has a different offset on VI */
- gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
}
/* GRBM_GFX_INDEX has a different offset on VI */
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
}
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
@@ -3600,13 +3602,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v8_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
@@ -3629,7 +3631,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
/* cache the values for userspace */
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
adev->gfx.config.rb_config[i][j].rb_backend_disable =
RREG32(mmCC_RB_BACKEND_DISABLE);
adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
@@ -3640,7 +3642,7 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
RREG32(mmPA_SC_RASTER_CONFIG_1);
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -3787,7 +3789,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmPA_SC_FIFO_SIZE,
(adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3818,7 +3820,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -3826,7 +3828,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
if (k == adev->usec_timeout) {
gfx_v8_0_select_se_sh(adev, 0xffffffff,
- 0xffffffff, 0xffffffff);
+ 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
@@ -3834,7 +3836,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -4292,7 +4294,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@@ -4314,12 +4316,12 @@ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
{
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
uint64_t queue_mask = 0;
int r, i;
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
- if (!test_bit(i, adev->gfx.mec.queue_bitmap))
+ if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
continue;
/* This situation may be hit in the future if a new HW
@@ -4595,14 +4597,13 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v8_0_kiq_setting(ring);
if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
/* reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -4625,8 +4626,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
}
return 0;
@@ -4650,15 +4651,13 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
}
@@ -4678,15 +4677,17 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
- if (unlikely(r != 0))
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
return r;
+ }
gfx_v8_0_kiq_init_queue(ring);
amdgpu_bo_kunmap(ring->mqd_obj);
@@ -4741,7 +4742,7 @@ static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
if (r)
return r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_test_helper(ring);
if (r)
return r;
@@ -4808,7 +4809,7 @@ static int gfx_v8_0_hw_init(void *handle)
static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
{
int r, i;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
if (r)
@@ -4902,7 +4903,7 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false);
else
@@ -4911,7 +4912,7 @@ static int gfx_v8_0_hw_fini(void *handle)
adev->gfx.rlc.funcs->stop(adev);
else
pr_err("rlc is busy, skip halt rlc\n");
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5376,7 +5377,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5430,7 +5431,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -5481,7 +5482,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
{
uint32_t data;
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5535,7 +5536,7 @@ static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -5562,7 +5563,7 @@ static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -5621,7 +5622,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5717,7 +5718,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5727,7 +5728,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5810,7 +5811,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
@@ -6723,11 +6724,11 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
*/
if (from_wq) {
mutex_lock(&adev->grbm_idx_mutex);
- gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
+ gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -7001,7 +7002,7 @@ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
@@ -7116,7 +7117,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
mask = 1;
ao_bitmap = 0;
counter = 0;
- gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
if (i < 4 && j < 2)
gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
@@ -7137,7 +7138,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
}
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index adbcd8127c82..4a7556099469 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -898,7 +898,7 @@ static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
- adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
+ adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
}
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
@@ -1504,7 +1504,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
mask = 1;
cu_bitmap = 0;
counter = 0;
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (cu_info->bitmap[i][j] & mask) {
@@ -1523,7 +1523,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -1545,7 +1545,7 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
@@ -1594,7 +1594,7 @@ static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
/* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
/* set mmRLC_LB_PARAMS = 0x003F_1006 */
@@ -1713,7 +1713,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
const struct gfx_firmware_header_v1_0 *mec_hdr;
- bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+ bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/* take ownership of the relevant compute queues */
amdgpu_gfx_compute_queue_acquire(adev);
@@ -1831,7 +1831,7 @@ static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm)
{
- soc15_grbm_select(adev, me, pipe, q, vm);
+ soc15_grbm_select(adev, me, pipe, q, vm, 0);
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
@@ -2154,7 +2154,8 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
- if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
+ if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+ k, j))
continue;
r = gfx_v9_0_compute_ring_init(adev,
@@ -2168,19 +2169,19 @@ static int gfx_v9_0_sw_init(void *handle)
}
}
- r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
+ r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
if (r) {
DRM_ERROR("Failed to init KIQ BOs!\n");
return r;
}
- kiq = &adev->gfx.kiq;
- r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
+ kiq = &adev->gfx.kiq[0];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
if (r)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
- r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
+ r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
if (r)
return r;
@@ -2215,9 +2216,9 @@ static int gfx_v9_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_gfx_mqd_sw_fini(adev);
- amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
- amdgpu_gfx_kiq_fini(adev);
+ amdgpu_gfx_mqd_sw_fini(adev, 0);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
+ amdgpu_gfx_kiq_fini(adev, 0);
gfx_v9_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
@@ -2240,7 +2241,7 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
}
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
- u32 instance)
+ u32 instance, int xcc_id)
{
u32 data;
@@ -2289,13 +2290,13 @@ static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
data = gfx_v9_0_get_rb_active_bitmap(adev);
active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
rb_bitmap_width_per_sh);
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
@@ -2323,12 +2324,12 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_lock(&adev->srbm_mutex);
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
+ soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
}
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
@@ -2393,7 +2394,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
/* where to put LDS, scratch, GPUVM in FSA64 space */
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
+ soc15_grbm_select(adev, 0, 0, 0, i, 0);
/* CP and shaders */
if (i == 0) {
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
@@ -2415,7 +2416,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
}
}
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
@@ -2432,7 +2433,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -2440,7 +2441,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
if (k == adev->usec_timeout) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff,
- 0xffffffff, 0xffffffff);
+ 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
@@ -2448,7 +2449,7 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3155,7 +3156,7 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
}
udelay(50);
}
@@ -3519,7 +3520,6 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
- int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
struct v9_mqd *tmp_mqd;
gfx_v9_0_kiq_setting(ring);
@@ -3529,20 +3529,20 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
* driver need to re-init the mqd.
* check mqd->cp_hqd_pq_control since this value should not be 0
*/
- tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+ tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
/* for GPU_RESET case , reset MQD to a clean status */
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_kiq_init_register(ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
@@ -3551,14 +3551,14 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev) && adev->in_suspend)
amdgpu_ring_clear_ring(ring);
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring);
gfx_v9_0_kiq_init_register(ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ if (adev->gfx.kiq[0].mqd_backup)
+ memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
@@ -3582,24 +3582,21 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
gfx_v9_0_mqd_init(ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
- } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ } else {
+ /* restore MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
-
/* reset ring buffer */
ring->wptr = 0;
atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
- } else {
- amdgpu_ring_clear_ring(ring);
}
return 0;
@@ -3610,7 +3607,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
int r;
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
@@ -3652,7 +3649,7 @@ static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
goto done;
}
- r = amdgpu_gfx_enable_kcq(adev);
+ r = amdgpu_gfx_enable_kcq(adev, 0);
done:
return r;
}
@@ -3771,7 +3768,7 @@ static int gfx_v9_0_hw_fini(void *handle)
/* DF freeze and kcq disable will fail */
if (!amdgpu_ras_intr_triggered())
/* disable KCQ to avoid CPC touch memory not valid anymore */
- amdgpu_gfx_disable_kcq(adev);
+ amdgpu_gfx_disable_kcq(adev, 0);
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3789,11 +3786,11 @@ static int gfx_v9_0_hw_fini(void *handle)
*/
if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
- adev->gfx.kiq.ring.pipe,
- adev->gfx.kiq.ring.queue, 0);
- gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
+ adev->gfx.kiq[0].ring.pipe,
+ adev->gfx.kiq[0].ring.queue, 0, 0);
+ gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
@@ -3913,7 +3910,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
unsigned long flags;
uint32_t seq, reg_val_offs = 0;
uint64_t value = 0;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
@@ -4543,6 +4540,7 @@ static int gfx_v9_0_early_init(void *handle)
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
+ adev->gfx.num_xcd = 1;
adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
AMDGPU_MAX_COMPUTE_RINGS);
gfx_v9_0_set_kiq_pm4_funcs(adev);
@@ -4623,7 +4621,7 @@ static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
@@ -4640,7 +4638,7 @@ static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
@@ -4651,7 +4649,7 @@ static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -4663,7 +4661,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -4690,7 +4688,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -4757,7 +4755,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
}
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
@@ -4768,7 +4766,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
if (!adev->gfx.num_gfx_rings)
return;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
/* Enable 3D CGCG/CGLS */
if (enable) {
@@ -4812,7 +4810,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -4820,7 +4818,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -4864,7 +4862,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5385,7 +5383,7 @@ static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
@@ -6608,7 +6606,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
- amdgpu_gfx_select_se_sh(adev, j, 0x0, k);
+ amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
}
}
@@ -6670,7 +6668,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
- amdgpu_gfx_select_se_sh(adev, j, 0, k);
+ amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
@@ -6685,7 +6683,7 @@ static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ce_count += sec_count;
err_data->ue_count += ded_count;
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
gfx_v9_0_query_utc_edc_status(adev, err_data);
@@ -6964,7 +6962,7 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
+ adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
@@ -7145,7 +7143,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
mask = 1;
ao_bitmap = 0;
counter = 0;
- amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
gfx_v9_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
@@ -7178,7 +7176,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
}
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
index dfe8d4841f58..f9f6edc5e558 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
@@ -27,6 +27,6 @@
extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
- u32 instance);
+ u32 instance, int xcc_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 3a797424579c..d648a29c33e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -761,7 +761,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
for (i = first_vmid; i < last_vmid; i++) {
data = 0;
- soc15_grbm_select(adev, 0, 0, 0, i);
+ soc15_grbm_select(adev, 0, 0, 0, i, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
@@ -769,7 +769,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
}
- soc15_grbm_select(adev, 0, 0, 0, 0);
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
@@ -777,7 +777,7 @@ void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
{
u32 tmp;
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
tmp = 0;
tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 5f8500577c02..c9ae3065a13d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -25,16 +25,294 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
+#include "soc15d.h"
#include "soc15_common.h"
#include "vega10_enum.h"
+#include "clearstate_gfx9.h"
+#include "v9_structs.h"
+
+#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+
#include "gc/gc_9_4_3_offset.h"
#include "gc/gc_9_4_3_sh_mask.h"
#include "gfx_v9_4_3.h"
+MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
+MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
+
+#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+static const struct soc15_reg_golden golden_settings_gc_9_4_3[] = {
+
+};
+
+static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
+static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
+static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
+static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
+static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ struct amdgpu_cu_info *cu_info);
+
+static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_SET_RESOURCES_VMID_MASK(0) |
+ /* vmid_mask:0* queue_type:0 (KIQ) */
+ PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
+ amdgpu_ring_write(kiq_ring,
+ lower_32_bits(queue_mask)); /* queue mask lo */
+ amdgpu_ring_write(kiq_ring,
+ upper_32_bits(queue_mask)); /* queue mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
+ amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
+ amdgpu_ring_write(kiq_ring, 0); /* oac mask */
+ amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
+}
+
+static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
+ PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
+ PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
+ /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
+ /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
+ PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
+ /* num_queues: must be 1 */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
+}
+
+static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(action) |
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+
+ if (action == PREEMPT_QUEUES_NO_UNMAP) {
+ amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
+ amdgpu_ring_write(kiq_ring, seq);
+ } else {
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ }
+}
+
+static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq)
+{
+ uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
+ PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
+ PACKET3_QUERY_STATUS_COMMAND(2));
+ /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
+ PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
+ amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
+ amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
+}
+
+static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub)
+{
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
+ PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
+ PACKET3_INVALIDATE_TLBS_PASID(pasid) |
+ PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
+}
+
+static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
+ .kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
+ .kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
+ .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
+ .kiq_query_status = gfx_v9_4_3_kiq_query_status,
+ .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
+ .set_resources_size = 8,
+ .map_queues_size = 7,
+ .unmap_queues_size = 6,
+ .query_status_size = 7,
+ .invalidate_tlbs_size = 2,
+};
+
+static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->gfx.num_xcd; i++)
+ adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
+}
+
+static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
+{
+
+}
+
+static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
+ bool wc, uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
+ WRITE_DATA_DST_SEL(0) |
+ (wc ? WR_CONFIRM : 0));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
+
+static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
+ int mem_space, int opt, uint32_t addr0,
+ uint32_t addr1, uint32_t ref, uint32_t mask,
+ uint32_t inv)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring,
+ /* memory (1) or register (0) */
+ (WAIT_REG_MEM_MEM_SPACE(mem_space) |
+ WAIT_REG_MEM_OPERATION(opt) | /* wait */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(eng_sel)));
+
+ if (mem_space)
+ BUG_ON(addr0 & 0x3); /* Dword align */
+ amdgpu_ring_write(ring, addr0);
+ amdgpu_ring_write(ring, addr1);
+ amdgpu_ring_write(ring, ref);
+ amdgpu_ring_write(ring, mask);
+ amdgpu_ring_write(ring, inv); /* poll interval */
+}
+
+static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32_SOC15(GC, 0, regSCRATCH_REG0, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r)
+ return r;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0) -
+ PACKET3_SET_UCONFIG_REG_START);
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32_SOC15(GC, 0, regSCRATCH_REG0);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ return r;
+}
+
+static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib ib;
+ struct dma_fence *f = NULL;
+
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
+ long r;
+
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
+ return r;
+
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
+ goto err1;
+
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (r)
+ goto err2;
+
+ r = dma_fence_wait_timeout(f, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto err2;
+ } else if (r < 0) {
+ goto err2;
+ }
+
+ tmp = adev->wb.wb[index];
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
+
+err2:
+ amdgpu_ib_free(adev, &ib, NULL);
+ dma_fence_put(f);
+err1:
+ amdgpu_device_wb_free(adev, index);
+ return r;
+}
+
+
+/* This value might differs per partition */
static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
@@ -50,10 +328,248 @@ static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
return clock;
}
+static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
+{
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
+static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
+{
+ char fw_name[30];
+ int err;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ uint16_t version_major;
+ uint16_t version_minor;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
+ if (err)
+ goto out;
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
+ version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
+ err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+
+ return err;
+}
+
+static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev)
+{
+ return true;
+}
+
+static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev)
+{
+ if (gfx_v9_4_3_should_disable_gfxoff(adev->pdev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+}
+
+static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
+ const char *chip_name)
+{
+ char fw_name[30];
+ int err;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ if (err)
+ goto out;
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
+
+ adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
+ adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
+
+ gfx_v9_4_3_check_if_need_gfxoff(adev);
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ return err;
+}
+
+static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ int r;
+
+ chip_name = "gc_9_4_3";
+
+ r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
+ if (r)
+ return r;
+
+ r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static u32 gfx_v9_4_3_get_csb_size(struct amdgpu_device *adev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+static void gfx_v9_4_3_get_csb_buffer(struct amdgpu_device *adev,
+ volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index -
+ PACKET3_SET_CONTEXT_REG_START);
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
+}
+
+static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
+{
+ int r, i;
+ u32 *hpd;
+ const __le32 *fw_data;
+ unsigned fw_size;
+ u32 *fw;
+ size_t mec_hpd_size;
+
+ const struct gfx_firmware_header_v1_0 *mec_hdr;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++)
+ bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
+ AMDGPU_MAX_COMPUTE_QUEUES);
+
+ /* take ownership of the relevant compute queues */
+ amdgpu_gfx_compute_queue_acquire(adev);
+ mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
+ if (mec_hpd_size) {
+ r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.mec.hpd_eop_obj,
+ &adev->gfx.mec.hpd_eop_gpu_addr,
+ (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
+ gfx_v9_4_3_mec_fini(adev);
+ return r;
+ }
+
+ if (amdgpu_emu_mode == 1) {
+ for (i = 0; i < mec_hpd_size / 4; i++) {
+ memset((void *)(hpd + i), 0, 4);
+ if (i % 50 == 0)
+ msleep(1);
+ }
+ } else {
+ memset(hpd, 0, mec_hpd_size);
+ }
+
+ amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
+ }
+
+ mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
+
+ r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.mec.mec_fw_obj,
+ &adev->gfx.mec.mec_fw_gpu_addr,
+ (void **)&fw);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
+ gfx_v9_4_3_mec_fini(adev);
+ return r;
+ }
+
+ memcpy(fw, fw_data, fw_size);
+
+ amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
+ amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
+
+ return 0;
+}
+
static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev,
u32 se_num,
u32 sh_num,
- u32 instance)
+ u32 instance,
+ int xcc_id)
{
u32 data;
@@ -76,7 +592,7 @@ static void gfx_v9_4_3_select_se_sh(struct amdgpu_device *adev,
else
data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
- WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
+ WREG32_SOC15_RLC_SHADOW_EX(reg, GC, xcc_id, regGRBM_GFX_INDEX, data);
}
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
@@ -147,7 +663,425 @@ static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd
static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 q, u32 vm)
{
- soc15_grbm_select(adev, me, pipe, q, vm);
+ soc15_grbm_select(adev, me, pipe, q, vm, 0);
+}
+
+static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_4_3_select_se_sh,
+ .read_wave_data = &gfx_v9_4_3_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
+};
+
+static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
+{
+ u32 gb_addr_config;
+
+ adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ adev->gfx.config.gb_addr_config = gb_addr_config;
+
+ adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_PIPES);
+
+ adev->gfx.config.max_tile_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+
+ adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_BANKS);
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ MAX_COMPRESSED_FRAGS);
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_RB_PER_SE);
+ adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ NUM_SHADER_ENGINES);
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
+ REG_GET_FIELD(
+ adev->gfx.config.gb_addr_config,
+ GB_ADDR_CONFIG,
+ PIPE_INTERLEAVE_SIZE));
+
+ return 0;
+}
+
+static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ int xcc_id, int mec, int pipe, int queue)
+{
+ unsigned irq_type;
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
+
+ ring = &adev->gfx.compute_ring[ring_id];
+
+ /* mec0 is me1 */
+ ring->xcc_id = xcc_id;
+ ring->me = mec + 1;
+ ring->pipe = pipe;
+ ring->queue = queue;
+
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+ if (xcc_id >= 1)
+ ring->doorbell_index =
+ (adev->doorbell_index.xcc1_mec_ring0_start +
+ ring_id - adev->gfx.num_compute_rings) << 1;
+ else
+ ring->doorbell_index =
+ (adev->doorbell_index.mec_ring0 + ring_id) << 1;
+ ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ + (ring_id * GFX9_MEC_HPD_SIZE);
+ ring->vm_hub = AMDGPU_GFXHUB_0;
+ sprintf(ring->name, "comp_%d.%d.%d.%d",
+ ring->xcc_id, ring->me, ring->pipe, ring->queue);
+
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ + ring->pipe;
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ /* type-2 packets are deprecated on MEC, use type-3 instead */
+ return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
+}
+
+static int gfx_v9_4_3_sw_init(void *handle)
+{
+ int i, j, k, r, ring_id, xcc_id;
+ struct amdgpu_kiq *kiq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->gfx.mec.num_mec = 2;
+ adev->gfx.mec.num_pipe_per_mec = 4;
+ adev->gfx.mec.num_queue_per_pipe = 8;
+
+ /* EOP Event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
+ if (r)
+ return r;
+
+ /* Privileged reg */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
+ &adev->gfx.priv_reg_irq);
+ if (r)
+ return r;
+
+ /* Privileged inst */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
+ &adev->gfx.priv_inst_irq);
+ if (r)
+ return r;
+
+ adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
+
+ r = adev->gfx.rlc.funcs->init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
+ r = gfx_v9_4_3_mec_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init MEC BOs!\n");
+ return r;
+ }
+
+ /* set up the compute queues - allocate horizontally across pipes */
+ ring_id = 0;
+ for (xcc_id = 0; xcc_id < adev->gfx.num_xcd; xcc_id++) {
+
+ for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+ for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+ for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
+ k++) {
+ if (!amdgpu_gfx_is_mec_queue_enabled(
+ adev, xcc_id, i, k, j))
+ continue;
+
+ r = gfx_v9_4_3_compute_ring_init(adev,
+ ring_id,
+ xcc_id,
+ i, k, j);
+ if (r)
+ return r;
+
+ ring_id++;
+ }
+ }
+ }
+
+ r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
+ if (r) {
+ DRM_ERROR("Failed to init KIQ BOs!\n");
+ return r;
+ }
+
+ kiq = &adev->gfx.kiq[xcc_id];
+ r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, xcc_id);
+ if (r)
+ return r;
+
+ /* create MQD for all compute queues as wel as KIQ for SRIOV case */
+ r = amdgpu_gfx_mqd_sw_init(adev,
+ sizeof(struct v9_mqd_allocation), xcc_id);
+ if (r)
+ return r;
+ }
+
+ r = gfx_v9_4_3_gpu_early_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int gfx_v9_4_3_sw_fini(void *handle)
+{
+ int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->gfx.num_compute_rings *
+ adev->gfx.num_xcd; i++)
+ amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ amdgpu_gfx_mqd_sw_fini(adev, i);
+ amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
+ amdgpu_gfx_kiq_fini(adev, i);
+ }
+
+ gfx_v9_4_3_mec_fini(adev);
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ gfx_v9_4_3_free_microcode(adev);
+
+ return 0;
+}
+
+static u32 gfx_v9_4_3_get_rb_active_bitmap(struct amdgpu_device *adev)
+{
+ u32 data, mask;
+
+ data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
+ data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+
+ data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
+ data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+
+ mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se);
+
+ return (~data) & mask;
+}
+
+static void gfx_v9_4_3_setup_rb(struct amdgpu_device *adev, int xcc_id)
+{
+ int i, j;
+ u32 data;
+ u32 active_rbs = 0;
+ u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
+ data = gfx_v9_4_3_get_rb_active_bitmap(adev);
+ active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
+ rb_bitmap_width_per_sh);
+ }
+ }
+ gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ adev->gfx.config.backend_enable_mask = active_rbs;
+ adev->gfx.config.num_rbs = hweight32(active_rbs);
+}
+
+#define DEFAULT_SH_MEM_BASES (0x6000)
+static void gfx_v9_4_3_init_compute_vmid(struct amdgpu_device *adev, int xcc_id)
+{
+ int i;
+ uint32_t sh_mem_config;
+ uint32_t sh_mem_bases;
+
+ /*
+ * Configure apertures:
+ * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
+ * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
+ * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
+ */
+ sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
+
+ sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
+
+ mutex_lock(&adev->srbm_mutex);
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
+ soc15_grbm_select(adev, 0, 0, 0, i, xcc_id);
+ /* CP and shaders */
+ WREG32_SOC15_RLC(GC, xcc_id, regSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15_RLC(GC, xcc_id, regSH_MEM_BASES, sh_mem_bases);
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id);
+ mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_BASE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_SIZE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_GWS_VMID0, i, 0);
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_OA_VMID0, i, 0);
+ }
+}
+
+static void gfx_v9_4_3_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
+{
+ int vmid;
+
+ /*
+ * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
+ * access. Compute VMIDs should be enabled by FW for target VMIDs,
+ * the driver can enable them for graphics. VMID0 should maintain
+ * access so that HWS firmware can save/restore entries.
+ */
+ for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_BASE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_VMID0_SIZE, 2 * vmid, 0);
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_GWS_VMID0, vmid, 0);
+ WREG32_SOC15_OFFSET(GC, xcc_id, regGDS_OA_VMID0, vmid, 0);
+ }
+}
+
+static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
+{
+ u32 tmp;
+ int i, j;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ WREG32_FIELD15_PREREG(GC, i, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ gfx_v9_4_3_setup_rb(adev, i);
+ }
+
+ gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
+ adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, regDB_DEBUG2);
+
+ /* XXX SH_MEM regs */
+ /* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&adev->srbm_mutex);
+ for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
+ for (j = 0; j < adev->gfx.num_xcd; j++) {
+ soc15_grbm_select(adev, 0, 0, 0, i, j);
+ /* CP and shaders */
+ if (i == 0) {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
+ !!adev->gmc.noretry);
+ WREG32_SOC15_RLC(GC, j, regSH_MEM_CONFIG, tmp);
+ WREG32_SOC15_RLC(GC, j, regSH_MEM_BASES, 0);
+ } else {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
+ !!adev->gmc.noretry);
+ WREG32_SOC15_RLC(GC, j, regSH_MEM_CONFIG, tmp);
+ tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
+ (adev->gmc.private_aperture_start >> 48));
+ tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
+ (adev->gmc.shared_aperture_start >> 48));
+ WREG32_SOC15_RLC(GC, j, regSH_MEM_BASES, tmp);
+ }
+ }
+ }
+ soc15_grbm_select(adev, 0, 0, 0, 0, 0);
+
+ mutex_unlock(&adev->srbm_mutex);
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ gfx_v9_4_3_init_compute_vmid(adev, i);
+ gfx_v9_4_3_init_gds_vmid(adev, i);
+ }
+}
+
+static void gfx_v9_4_3_enable_save_restore_machine(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ WREG32_FIELD15_PREREG(GC, xcc_id, RLC_SRM_CNTL, SRM_ENABLE, 1);
+}
+
+static void gfx_v9_4_3_init_csb(struct amdgpu_device *adev, int xcc_id)
+{
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
+ /* csib */
+ WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_ADDR_HI),
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_ADDR_LO),
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ WREG32_RLC(SOC15_REG_OFFSET(GC, xcc_id, regRLC_CSIB_LENGTH),
+ adev->gfx.rlc.clear_state_size);
+}
+
+static void gfx_v9_4_3_init_pg(struct amdgpu_device *adev, int xcc_id)
+{
+ gfx_v9_4_3_init_csb(adev, xcc_id);
+
+ /*
+ * Rlc save restore list is workable since v2_1.
+ * And it's needed by gfxoff feature.
+ */
+ if (adev->gfx.rlc.is_rlc_v2_1)
+ gfx_v9_4_3_enable_save_restore_machine(adev, xcc_id);
+
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ WREG32_SOC15(GC, 0, regRLC_JUMP_TABLE_RESTORE,
+ adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ }
+}
+
+static void gfx_v9_4_3_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG);
+ data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
+ WREG32_SOC15(GC, xcc_id, regCPC_PSP_DEBUG, data);
}
static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
@@ -162,14 +1096,14 @@ static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
return true;
}
-static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
unsigned i;
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
+ WREG32_SOC15(GC, xcc_id, regRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -179,16 +1113,30 @@ static void gfx_v9_4_3_set_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_4_3_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
- WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
+ WREG32_SOC15(GC, xcc_id, regRLC_SAFE_MODE, data);
}
static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
{
+ const struct cs_section_def *cs_data;
+ int r;
+
+ adev->gfx.rlc.cs_data = gfx9_cs_data;
+
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (cs_data) {
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
+ return r;
+ }
+
/* init spm vmid with 0xf */
if (adev->gfx.rlc.funcs->update_spm_vmid)
adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
@@ -196,7 +1144,8 @@ static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
+static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev,
+ int xcc_id)
{
u32 i, j, k;
u32 mask;
@@ -204,7 +1153,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff);
+ gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
for (k = 0; k < adev->usec_timeout; k++) {
if (RREG32_SOC15(GC, 0, regRLC_SERDES_CU_MASTER_BUSY) == 0)
break;
@@ -212,7 +1161,8 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
if (k == adev->usec_timeout) {
gfx_v9_4_3_select_se_sh(adev, 0xffffffff,
- 0xffffffff, 0xffffffff);
+ 0xffffffff, 0xffffffff,
+ xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
i, j);
@@ -220,7 +1170,7 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
}
}
- gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -235,36 +1185,42 @@ static void gfx_v9_4_3_wait_for_rlc_serdes(struct amdgpu_device *adev)
}
static void gfx_v9_4_3_enable_gui_idle_interrupt(struct amdgpu_device *adev,
- bool enable)
+ bool enable, int xcc_id)
{
u32 tmp;
/* These interrupts should be enabled to drive DS clock */
- tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
+ tmp = RREG32_SOC15(GC, xcc_id, regCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- if (adev->gfx.num_gfx_rings)
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
- WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
+ WREG32_SOC15(GC, xcc_id, regCP_INT_CNTL_RING0, tmp);
}
static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
{
- WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
- gfx_v9_4_3_enable_gui_idle_interrupt(adev, false);
- gfx_v9_4_3_wait_for_rlc_serdes(adev);
+ int i;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 0);
+ gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i);
+ gfx_v9_4_3_wait_for_rlc_serdes(adev, i);
+ }
}
static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
{
- WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- udelay(50);
- WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- udelay(50);
+ int i;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+ udelay(50);
+ WREG32_FIELD15_PREREG(GC, i, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
+ udelay(50);
+ }
}
static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
@@ -272,35 +1228,38 @@ static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
+ int i;
- WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
- udelay(50);
-
- /* carrizo do enable cp interrupt after cp inited */
- if (!(adev->flags & AMD_IS_APU)) {
- gfx_v9_4_3_enable_gui_idle_interrupt(adev, true);
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ WREG32_FIELD15_PREREG(GC, i, RLC_CNTL, RLC_ENABLE_F32, 1);
udelay(50);
- }
+
+ /* carrizo do enable cp interrupt after cp inited */
+ if (!(adev->flags & AMD_IS_APU)) {
+ gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);
+ udelay(50);
+ }
#ifdef AMDGPU_RLC_DEBUG_RETRY
- /* RLC_GPM_GENERAL_6 : RLC Ucode version */
- rlc_ucode_ver = RREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_6);
- if (rlc_ucode_ver == 0x108) {
- dev_info(adev->dev,
- "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
- rlc_ucode_ver, adev->gfx.rlc_fw_version);
- /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
- * default is 0x9C4 to create a 100us interval */
- WREG32_SOC15(GC, 0, regRLC_GPM_TIMER_INT_3, 0x9C4);
- /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
- * to disable the page fault retry interrupts, default is
- * 0x100 (256) */
- WREG32_SOC15(GC, 0, regRLC_GPM_GENERAL_12, 0x100);
- }
+ /* RLC_GPM_GENERAL_6 : RLC Ucode version */
+ rlc_ucode_ver = RREG32_SOC15(GC, i, regRLC_GPM_GENERAL_6);
+ if (rlc_ucode_ver == 0x108) {
+ dev_info(adev->dev,
+ "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
+ rlc_ucode_ver, adev->gfx.rlc_fw_version);
+ /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
+ * default is 0x9C4 to create a 100us interval */
+ WREG32_SOC15(GC, i, regRLC_GPM_TIMER_INT_3, 0x9C4);
+ /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
+ * to disable the page fault retry interrupts, default is
+ * 0x100 (256) */
+ WREG32_SOC15(GC, i, regRLC_GPM_GENERAL_12, 0x100);
+ }
#endif
+ }
}
-static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev)
+static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev, int xcc_id)
{
const struct rlc_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
@@ -316,37 +1275,38 @@ static int gfx_v9_4_3_rlc_load_microcode(struct amdgpu_device *adev)
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
+ WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_ADDR,
RLCG_UCODE_LOADING_START_ADDRESS);
for (i = 0; i < fw_size; i++) {
if (amdgpu_emu_mode == 1 && i % 100 == 0) {
dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
msleep(1);
}
- WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
}
- WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
+ WREG32_SOC15(GC, xcc_id, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
return 0;
}
static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
{
- int r;
+ int r, i;
adev->gfx.rlc.funcs->stop(adev);
- /* disable CG */
- WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ /* disable CG */
+ WREG32_SOC15(GC, i, regRLC_CGCG_CGLS_CTRL, 0);
- /* TODO: revisit pg function */
- /* gfx_v9_4_3_init_pg(adev);*/
+ gfx_v9_4_3_init_pg(adev, i);
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- /* legacy rlc firmware loading */
- r = gfx_v9_4_3_rlc_load_microcode(adev);
- if (r)
- return r;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ /* legacy rlc firmware loading */
+ r = gfx_v9_4_3_rlc_load_microcode(adev, i);
+ if (r)
+ return r;
+ }
}
adev->gfx.rlc.funcs->start(adev);
@@ -354,7 +1314,8 @@ static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev,
+ unsigned vmid)
{
u32 reg, data;
@@ -407,20 +1368,916 @@ static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offs
ARRAY_SIZE(rlcg_access_gc_9_4_3));
}
-const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_4_3_select_se_sh,
- .read_wave_data = &gfx_v9_4_3_read_wave_data,
- .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
-};
+static void gfx_v9_4_3_cp_compute_enable(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ if (enable) {
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_MEC_CNTL, 0);
+ } else {
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_MEC_CNTL,
+ (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
+ adev->gfx.kiq[xcc_id].ring.sched.ready = false;
+ }
+ udelay(50);
+}
+
+static int gfx_v9_4_3_cp_compute_load_microcode(struct amdgpu_device *adev,
+ int xcc_id)
+{
+ const struct gfx_firmware_header_v1_0 *mec_hdr;
+ const __le32 *fw_data;
+ unsigned i;
+ u32 tmp;
+ u32 mec_ucode_addr_offset;
+ u32 mec_ucode_data_offset;
+
+ if (!adev->gfx.mec_fw)
+ return -EINVAL;
+
+ gfx_v9_4_3_cp_compute_enable(adev, false, xcc_id);
+
+ mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
+
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_CNTL, tmp);
+
+ WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_LO,
+ adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
+ WREG32_SOC15(GC, xcc_id, regCP_CPC_IC_BASE_HI,
+ upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
+
+ mec_ucode_addr_offset =
+ SOC15_REG_OFFSET(GC, xcc_id, regCP_MEC_ME1_UCODE_ADDR);
+ mec_ucode_data_offset =
+ SOC15_REG_OFFSET(GC, xcc_id, regCP_MEC_ME1_UCODE_DATA);
+
+ /* MEC1 */
+ WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
+ for (i = 0; i < mec_hdr->jt_size; i++)
+ WREG32(mec_ucode_data_offset,
+ le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
+
+ WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
+ /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
+
+ return 0;
+}
+
+/* KIQ functions */
+static void gfx_v9_4_3_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
+{
+ uint32_t tmp;
+ struct amdgpu_device *adev = ring->adev;
+
+ /* tell RLC which is KIQ queue */
+ tmp = RREG32_SOC15(GC, xcc_id, regRLC_CP_SCHEDULERS);
+ tmp &= 0xffffff00;
+ tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
+ WREG32_SOC15_RLC(GC, xcc_id, regRLC_CP_SCHEDULERS, tmp);
+ tmp |= 0x80;
+ WREG32_SOC15_RLC(GC, xcc_id, regRLC_CP_SCHEDULERS, tmp);
+}
+
+static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ }
+ }
+}
+
+static int gfx_v9_4_3_mqd_init(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
+ uint32_t tmp;
+
+ mqd->header = 0xC0310800;
+ mqd->compute_pipelinestat_enable = 0x00000001;
+ mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
+ mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
+ mqd->compute_misc_reserved = 0x00000003;
+
+ mqd->dynamic_cu_mask_addr_lo =
+ lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi =
+ upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+
+ eop_base_addr = ring->eop_gpu_addr >> 8;
+ mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
+ mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
+ (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
+
+ mqd->cp_hqd_eop_control = tmp;
+
+ /* enable doorbell? */
+ tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
+
+ if (ring->use_doorbell) {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_OFFSET, ring->doorbell_index);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_SOURCE, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_HIT, 0);
+ } else {
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN, 0);
+ }
+
+ mqd->cp_hqd_pq_doorbell_control = tmp;
+
+ /* disable the queue if it's active */
+ ring->wptr = 0;
+ mqd->cp_hqd_dequeue_request = 0;
+ mqd->cp_hqd_pq_rptr = 0;
+ mqd->cp_hqd_pq_wptr_lo = 0;
+ mqd->cp_hqd_pq_wptr_hi = 0;
+
+ /* set the pointer to the MQD */
+ mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
+ mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
+
+ /* set MQD vmid to 0 */
+ tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
+ mqd->cp_mqd_control = tmp;
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ hqd_gpu_addr = ring->gpu_addr >> 8;
+ mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
+ mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
+ (order_base_2(ring->ring_size / 4) - 1));
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
+ ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
+#ifdef __BIG_ENDIAN
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
+#endif
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
+ mqd->cp_hqd_pq_control = tmp;
+
+ /* set the wb address whether it's enabled or not */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_rptr_report_addr_hi =
+ upper_32_bits(wb_gpu_addr) & 0xffff;
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
+ mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
+
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ ring->wptr = 0;
+ mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
+
+ /* set the vmid for the queue */
+ mqd->cp_hqd_vmid = 0;
+
+ tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
+ mqd->cp_hqd_persistent_state = tmp;
+
+ /* set MIN_IB_AVAIL_SIZE */
+ tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
+ tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
+ mqd->cp_hqd_ib_control = tmp;
+
+ /* set static priority for a queue/ring */
+ gfx_v9_4_3_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(regCP_HQD_QUANTUM);
+
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd->cp_hqd_active = 1;
+
+ return 0;
+}
+
+static int gfx_v9_4_3_kiq_init_register(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ int j;
+
+ /* disable wptr polling */
+ WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_WPTR_POLL_CNTL, EN, 0);
+
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_BASE_ADDR,
+ mqd->cp_hqd_eop_base_addr_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_BASE_ADDR_HI,
+ mqd->cp_hqd_eop_base_addr_hi);
+
+ /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_EOP_CONTROL,
+ mqd->cp_hqd_eop_control);
+
+ /* enable doorbell? */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL,
+ mqd->cp_hqd_pq_doorbell_control);
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1) {
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 1);
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST,
+ mqd->cp_hqd_dequeue_request);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR,
+ mqd->cp_hqd_pq_rptr);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO,
+ mqd->cp_hqd_pq_wptr_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI,
+ mqd->cp_hqd_pq_wptr_hi);
+ }
-const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
+ /* set the pointer to the MQD */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_BASE_ADDR,
+ mqd->cp_mqd_base_addr_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_BASE_ADDR_HI,
+ mqd->cp_mqd_base_addr_hi);
+
+ /* set MQD vmid to 0 */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_MQD_CONTROL,
+ mqd->cp_mqd_control);
+
+ /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_BASE,
+ mqd->cp_hqd_pq_base_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_BASE_HI,
+ mqd->cp_hqd_pq_base_hi);
+
+ /* set up the HQD, this is similar to CP_RB0_CNTL */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_CONTROL,
+ mqd->cp_hqd_pq_control);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR_REPORT_ADDR,
+ mqd->cp_hqd_pq_rptr_report_addr_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
+ mqd->cp_hqd_pq_rptr_report_addr_hi);
+
+ /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_POLL_ADDR,
+ mqd->cp_hqd_pq_wptr_poll_addr_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ mqd->cp_hqd_pq_wptr_poll_addr_hi);
+
+ /* enable the doorbell if requested */
+ if (ring->use_doorbell) {
+ WREG32_SOC15(GC, xcc_id, regCP_MEC_DOORBELL_RANGE_LOWER,
+ (adev->doorbell_index.kiq * 2) << 2);
+ WREG32_SOC15(GC, xcc_id, regCP_MEC_DOORBELL_RANGE_UPPER,
+ (adev->doorbell_index.userqueue_end * 2) << 2);
+ }
+
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL,
+ mqd->cp_hqd_pq_doorbell_control);
+
+ /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO,
+ mqd->cp_hqd_pq_wptr_lo);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI,
+ mqd->cp_hqd_pq_wptr_hi);
+
+ /* set the vmid for the queue */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_VMID, mqd->cp_hqd_vmid);
+
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PERSISTENT_STATE,
+ mqd->cp_hqd_persistent_state);
+
+ /* activate the queue */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_ACTIVE,
+ mqd->cp_hqd_active);
+
+ if (ring->use_doorbell)
+ WREG32_FIELD15_PREREG(GC, xcc_id, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
+
+ return 0;
+}
+
+static int gfx_v9_4_3_kiq_fini_register(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int j;
+
+ /* disable the queue if it's active */
+ if (RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1) {
+
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST, 1);
+
+ for (j = 0; j < adev->usec_timeout; j++) {
+ if (!(RREG32_SOC15(GC, xcc_id, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+
+ if (j == AMDGPU_MAX_USEC_TIMEOUT) {
+ DRM_DEBUG("KIQ dequeue request failed.\n");
+
+ /* Manual disable if dequeue request times out */
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_ACTIVE, 0);
+ }
+
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_DEQUEUE_REQUEST,
+ 0);
+ }
+
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_IQ_TIMER, 0);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_IB_CONTROL, 0);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PERSISTENT_STATE, 0);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_DOORBELL_CONTROL, 0);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_RPTR, 0);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_HI, 0);
+ WREG32_SOC15_RLC(GC, xcc_id, regCP_HQD_PQ_WPTR_LO, 0);
+
+ return 0;
+}
+
+static int gfx_v9_4_3_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ struct v9_mqd *tmp_mqd;
+
+ gfx_v9_4_3_kiq_setting(ring, xcc_id);
+
+ /* GPU could be in bad state during probe, driver trigger the reset
+ * after load the SMU, in this case , the mqd is not be initialized.
+ * driver need to re-init the mqd.
+ * check mqd->cp_hqd_pq_control since this value should not be 0
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
+ if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
+ /* for GPU_RESET case , reset MQD to a clean status */
+ if (adev->gfx.kiq[xcc_id].mqd_backup)
+ memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
+
+ /* reset ring buffer */
+ ring->wptr = 0;
+ amdgpu_ring_clear_ring(ring);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id);
+ gfx_v9_4_3_kiq_init_register(ring, xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id);
+ mutex_unlock(&adev->srbm_mutex);
+ } else {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id);
+ gfx_v9_4_3_mqd_init(ring);
+ gfx_v9_4_3_kiq_init_register(ring, xcc_id);
+ soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id);
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.kiq[xcc_id].mqd_backup)
+ memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct v9_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.compute_ring[0];
+ struct v9_mqd *tmp_mqd;
+
+ /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
+ * is not be initialized before
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+
+ if (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, xcc_id);
+ gfx_v9_4_3_mqd_init(ring);
+ soc15_grbm_select(adev, 0, 0, 0, 0, xcc_id);
+ mutex_unlock(&adev->srbm_mutex);
+
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ } else {
+ /* restore MQD to a clean status */
+ if (adev->gfx.mec.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
+ /* reset ring buffer */
+ ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
+ amdgpu_ring_clear_ring(ring);
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_kiq_resume(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ ring = &adev->gfx.kiq[xcc_id].ring;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (unlikely(r != 0)) {
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ return r;
+ }
+
+ gfx_v9_4_3_kiq_init_queue(ring, xcc_id);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ ring->sched.ready = true;
+ return 0;
+}
+
+static int gfx_v9_4_3_kcq_resume(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring = NULL;
+ int r = 0, i;
+
+ gfx_v9_4_3_cp_compute_enable(adev, true, xcc_id);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0))
+ goto done;
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v9_4_3_kcq_init_queue(ring, xcc_id);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r)
+ goto done;
+ }
+
+ r = amdgpu_gfx_enable_kcq(adev, xcc_id);
+done:
+ return r;
+}
+
+static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
+{
+ int r, i, j;
+ struct amdgpu_ring *ring;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ gfx_v9_4_3_enable_gui_idle_interrupt(adev, false, i);
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ gfx_v9_4_3_disable_gpa_mode(adev, i);
+
+ r = gfx_v9_4_3_cp_compute_load_microcode(adev, i);
+ if (r)
+ return r;
+ }
+
+ r = gfx_v9_4_3_kiq_resume(adev, i);
+ if (r)
+ return r;
+
+ r = gfx_v9_4_3_kcq_resume(adev, i);
+ if (r)
+ return r;
+
+ /* skip ring test on slave kcq */
+ if (amdgpu_gfx_is_master_xcc(adev, i)) {
+ for (j = 0; j < adev->gfx.num_compute_rings; j++) {
+ ring = &adev->gfx.compute_ring[j +
+ i * adev->gfx.num_compute_rings];
+ amdgpu_ring_test_helper(ring);
+ }
+ }
+
+ gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);
+ }
+
+ return 0;
+}
+
+static void gfx_v9_4_3_cp_enable(struct amdgpu_device *adev, bool enable,
+ int xcc_id)
+{
+ gfx_v9_4_3_cp_compute_enable(adev, enable, xcc_id);
+}
+
+static int gfx_v9_4_3_hw_init(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ gfx_v9_4_3_init_golden_registers(adev);
+
+ gfx_v9_4_3_constants_init(adev);
+
+ r = adev->gfx.rlc.funcs->resume(adev);
+ if (r)
+ return r;
+
+ r = gfx_v9_4_3_cp_resume(adev);
+ if (r)
+ return r;
+
+ return r;
+}
+
+static int gfx_v9_4_3_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ if (amdgpu_gfx_disable_kcq(adev, i))
+ DRM_ERROR("XCD %d KCQ disable failed\n", i);
+
+ /* Use deinitialize sequence from CAIL when unbinding device
+ * from driver, otherwise KIQ is hanging when binding back
+ */
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, adev->gfx.kiq[i].ring.me,
+ adev->gfx.kiq[i].ring.pipe,
+ adev->gfx.kiq[i].ring.queue, 0, i);
+ gfx_v9_4_3_kiq_fini_register(&adev->gfx.kiq[i].ring, i);
+ soc15_grbm_select(adev, 0, 0, 0, 0, i);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ gfx_v9_4_3_cp_enable(adev, false, i);
+ }
+
+ /* Skip suspend with A+A reset */
+ if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
+ dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
+ return 0;
+}
+
+static int gfx_v9_4_3_suspend(void *handle)
+{
+ return gfx_v9_4_3_hw_fini(handle);
+}
+
+static int gfx_v9_4_3_resume(void *handle)
+{
+ return gfx_v9_4_3_hw_init(handle);
+}
+
+static bool gfx_v9_4_3_is_idle(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ if (REG_GET_FIELD(RREG32_SOC15(GC, i, regGRBM_STATUS),
+ GRBM_STATUS, GUI_ACTIVE))
+ return false;
+ }
+ return true;
+}
+
+static int gfx_v9_4_3_wait_for_idle(void *handle)
+{
+ unsigned i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (gfx_v9_4_3_is_idle(handle))
+ return 0;
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gfx_v9_4_3_soft_reset(void *handle)
+{
+ u32 grbm_soft_reset = 0;
+ u32 tmp;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* GRBM_STATUS */
+ tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS);
+ if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
+ GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
+ GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
+ GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
+ GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
+ GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
+ }
+
+ if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
+ }
+
+ /* GRBM_STATUS2 */
+ tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS2);
+ if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
+ grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
+ GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
+
+
+ if (grbm_soft_reset) {
+ /* stop the rlc */
+ adev->gfx.rlc.funcs->stop(adev);
+
+ /* Disable MEC parsing/prefetching */
+ gfx_v9_4_3_cp_compute_enable(adev, false, 0);
+
+ if (grbm_soft_reset) {
+ tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
+ }
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
+ return 0;
+}
+
+static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
+ uint32_t vmid,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* GDS Base */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
+ gds_base);
+
+ /* GDS Size */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
+ gds_size);
+
+ /* GWS */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
+ gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
+
+ /* OA */
+ gfx_v9_4_3_write_data_to_reg(ring, 0, false,
+ SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
+ (1 << (oa_size + oa_base)) - (1 << oa_base));
+}
+
+static int gfx_v9_4_3_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ /* hardcode in emulation phase */
+ adev->gfx.num_xcd = 1;
+ adev->gfx.num_xcc_per_xcp = 1;
+ adev->gfx.partition_mode = AMDGPU_SPX_PARTITION_MODE;
+
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
+ gfx_v9_4_3_set_kiq_pm4_funcs(adev);
+ gfx_v9_4_3_set_ring_funcs(adev);
+ gfx_v9_4_3_set_irq_funcs(adev);
+ gfx_v9_4_3_set_gds_init(adev);
+ gfx_v9_4_3_set_rlc_funcs(adev);
+
+ return gfx_v9_4_3_init_microcode(adev);
+}
+
+static int gfx_v9_4_3_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static void gfx_v9_4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ uint32_t data, def;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+
+ /* It is disabled by HW by default */
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 1 - RLC_CGTT_MGCG_OVERRIDE */
+ def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE);
+
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
+ /* only for Vega10 & Raven1 */
+ data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ /* MGLS is a global flag to control all MGLS in GFX */
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ /* 2 - RLC memory Light sleep */
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ def = data = RREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL);
+ data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL, data);
+ }
+ /* 3 - CP memory Light sleep */
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ def = data = RREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL);
+ data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL, data);
+ }
+ }
+ } else {
+ /* 1 - MGCG_OVERRIDE */
+ def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE);
+
+ data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
+
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ /* 2 - disable MGLS in RLC */
+ data = RREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
+ data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, xcc_id, regRLC_MEM_SLP_CNTL, data);
+ }
+
+ /* 3 - disable MGLS in CP */
+ data = RREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, xcc_id, regCP_MEM_SLP_CNTL, data);
+ }
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
+static void gfx_v9_4_3_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ uint32_t def, data;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
+ def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE);
+ /* unset CGCG override */
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
+ else
+ data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
+ /* update CGCG and CGLS override bits */
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regRLC_CGTT_MGCG_OVERRIDE, data);
+
+ /* enable cgcg FSM(0x0000363F) */
+ def = RREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL);
+
+ if (adev->asic_type == CHIP_ARCTURUS)
+ data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ else
+ data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
+ data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data);
+
+ /* set IDLE_POLL_COUNT(0x00900100) */
+ def = RREG32_SOC15(GC, xcc_id, regCP_RB_WPTR_POLL_CNTL);
+ data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
+ (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regCP_RB_WPTR_POLL_CNTL, data);
+ } else {
+ def = data = RREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL);
+ /* reset CGCG/CGLS bits */
+ data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ /* disable cgcg and cgls in FSM */
+ if (def != data)
+ WREG32_SOC15(GC, xcc_id, regRLC_CGCG_CGLS_CTRL, data);
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
+}
+
+static int gfx_v9_4_3_update_gfx_clock_gating(struct amdgpu_device *adev,
+ bool enable, int xcc_id)
+{
+ if (enable) {
+ /* CGCG/CGLS should be enabled after MGCG/MGLS
+ * === MGCG + MGLS ===
+ */
+ gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable, xcc_id);
+ /* === CGCG + CGLS === */
+ gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable, xcc_id);
+ } else {
+ /* CGCG/CGLS should be disabled before MGCG/MGLS
+ * === CGCG + CGLS ===
+ */
+ gfx_v9_4_3_update_coarse_grain_clock_gating(adev, enable, xcc_id);
+ /* === MGCG + MGLS === */
+ gfx_v9_4_3_update_medium_grain_clock_gating(adev, enable, xcc_id);
+ }
+ return 0;
+}
+
+static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
.set_safe_mode = gfx_v9_4_3_set_safe_mode,
.unset_safe_mode = gfx_v9_4_3_unset_safe_mode,
.init = gfx_v9_4_3_rlc_init,
+ .get_csb_size = gfx_v9_4_3_get_csb_size,
+ .get_csb_buffer = gfx_v9_4_3_get_csb_buffer,
.resume = gfx_v9_4_3_rlc_resume,
.stop = gfx_v9_4_3_rlc_stop,
.reset = gfx_v9_4_3_rlc_reset,
@@ -428,3 +2285,844 @@ const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
};
+
+static int gfx_v9_4_3_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static int gfx_v9_4_3_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ for (i = 0; i < adev->gfx.num_xcd; i++)
+ gfx_v9_4_3_update_gfx_clock_gating(adev,
+ state == AMD_CG_STATE_GATE, i);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_GFX_MGCG */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_CGTT_MGCG_OVERRIDE));
+ if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
+ *flags |= AMD_CG_SUPPORT_GFX_MGCG;
+
+ /* AMD_CG_SUPPORT_GFX_CGCG */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_CGCG_CGLS_CTRL));
+ if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CGCG;
+
+ /* AMD_CG_SUPPORT_GFX_CGLS */
+ if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CGLS;
+
+ /* AMD_CG_SUPPORT_GFX_RLC_LS */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regRLC_MEM_SLP_CNTL));
+ if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
+
+ /* AMD_CG_SUPPORT_GFX_CP_LS */
+ data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, regCP_MEM_SLP_CNTL));
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
+}
+
+static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 ref_and_mask, reg_mem_engine;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ switch (ring->me) {
+ case 1:
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
+ break;
+ case 2:
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
+ break;
+ default:
+ return;
+ }
+ reg_mem_engine = 0;
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
+ reg_mem_engine = 1; /* pfp */
+ }
+
+ gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
+ adev->nbio.funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio.funcs->get_hdp_flush_done_offset(adev),
+ ref_and_mask, ref_and_mask, 0x20);
+}
+
+static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+
+ /* Currently, there is a high possibility to get wave ID mismatch
+ * between ME and GDS, leading to a hw deadlock, because ME generates
+ * different wave IDs than the GDS expects. This situation happens
+ * randomly when at least 5 compute pipes use GDS ordered append.
+ * The wave IDs generated by ME are also wrong after suspend/resume.
+ * Those are probably bugs somewhere else in the kernel driver.
+ *
+ * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
+ * GDS to 0 for this ring (me/pipe).
+ */
+ if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
+ amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
+ }
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
+ amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, control);
+}
+
+static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned flags)
+{
+ bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
+ bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
+ bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
+ /* RELEASE_MEM - flush caches, send int */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
+ amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
+ EOP_TC_NC_ACTION_EN) :
+ (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
+ EOP_TC_MD_ACTION_EN)) |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
+
+ /*
+ * the address should be Qword aligned if 64bit write, Dword
+ * aligned if only send 32bit data low (discard data high)
+ */
+ if (write64bit)
+ BUG_ON(addr & 0x7);
+ else
+ BUG_ON(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+ amdgpu_ring_write(ring, upper_32_bits(seq));
+ amdgpu_ring_write(ring, 0);
+}
+
+static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ seq, 0xffffffff, 4);
+}
+
+static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vmid, uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
+
+static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
+{
+ return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
+}
+
+static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
+{
+ u64 wptr;
+
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell)
+ wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
+ else
+ BUG();
+ return wptr;
+}
+
+static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* XXX check if swapping is necessary on BE */
+ if (ring->use_doorbell) {
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
+ WDOORBELL64(ring->doorbell_index, ring->wptr);
+ } else {
+ BUG(); /* only DOORBELL method supported on gfx9 now */
+ }
+}
+
+static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
+ u64 seq, unsigned int flags)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* we only allocate 32bit for each seq wb address */
+ BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
+
+ /* write fence seq to the "addr" */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, lower_32_bits(seq));
+
+ if (flags & AMDGPU_FENCE_FLAG_INT) {
+ /* set register to trigger INT */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
+ amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
+ }
+}
+
+static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 0 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ reg_val_offs * 4));
+}
+
+static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val)
+{
+ uint32_t cmd = 0;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
+ break;
+ case AMDGPU_RING_TYPE_KIQ:
+ cmd = (1 << 16); /* no inc addr */
+ break;
+ default:
+ cmd = WR_CONFIRM;
+ break;
+ }
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ amdgpu_ring_write(ring, cmd);
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val);
+}
+
+static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
+}
+
+static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
+ ref, mask);
+}
+
+static void gfx_v9_4_3_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
+ int me, int pipe,
+ enum amdgpu_interrupt_state state,
+ int xcc_id)
+{
+ u32 mec_int_cntl, mec_int_cntl_reg;
+
+ /*
+ * amdgpu controls only the first MEC. That's why this function only
+ * handles the setting of interrupts for this specific MEC. All other
+ * pipes' interrupts are set by amdkfd.
+ */
+
+ if (me == 1) {
+ switch (pipe) {
+ case 0:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE0_INT_CNTL);
+ break;
+ case 1:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE1_INT_CNTL);
+ break;
+ case 2:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE2_INT_CNTL);
+ break;
+ case 3:
+ mec_int_cntl_reg = SOC15_REG_OFFSET(GC, xcc_id, regCP_ME1_PIPE3_INT_CNTL);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+ } else {
+ DRM_DEBUG("invalid me %d\n", me);
+ return;
+ }
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ TIME_STAMP_INT_ENABLE, 0);
+ WREG32(mec_int_cntl_reg, mec_int_cntl);
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ mec_int_cntl = RREG32(mec_int_cntl_reg);
+ mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
+ TIME_STAMP_INT_ENABLE, 1);
+ WREG32(mec_int_cntl_reg, mec_int_cntl);
+ break;
+ default:
+ break;
+ }
+}
+
+static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ int i;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.num_xcd; i++)
+ WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ int i;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < adev->gfx.num_xcd; i++)
+ WREG32_FIELD15_PREREG(GC, i, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ int i;
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ switch (type) {
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 0, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 1, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 2, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 1, 3, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 0, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 1, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 2, state, i);
+ break;
+ case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
+ gfx_v9_4_3_set_compute_eop_interrupt_state(adev, 2, 3, state, i);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int i;
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+
+ DRM_DEBUG("IH: CP EOP\n");
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ /* Per-queue interrupt is supported for MEC starting from VI.
+ * The interrupt can only be enabled/disabled per pipe instead of per queue.
+ */
+ if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
+ amdgpu_fence_process(ring);
+ }
+ break;
+ }
+ return 0;
+}
+
+static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
+static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal register access in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
+static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_ERROR("Illegal instruction in command stream\n");
+ gfx_v9_4_3_fault(adev, entry);
+ return 0;
+}
+
+static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int cp_coher_cntl =
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
+static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
+ uint32_t pipe, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ uint32_t wcl_cs_reg;
+
+ /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
+ val = enable ? 0x1 : 0x7f;
+
+ switch (pipe) {
+ case 0:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS0);
+ break;
+ case 1:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS1);
+ break;
+ case 2:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS2);
+ break;
+ case 3:
+ wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_CS3);
+ break;
+ default:
+ DRM_DEBUG("invalid pipe %d\n", pipe);
+ return;
+ }
+
+ amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
+
+}
+static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t val;
+ int i;
+
+ /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
+ * number of gfx waves. Setting 5 bit will make sure gfx only gets
+ * around 25% of gpu resources.
+ */
+ val = enable ? 0x1f : 0x07ffffff;
+ amdgpu_ring_emit_wreg(ring,
+ SOC15_REG_OFFSET(GC, 0, regSPI_WCL_PIPE_PERCENT_GFX),
+ val);
+
+ /* Restrict waves for normal/low priority compute queues as well
+ * to get best QoS for high priority compute jobs.
+ *
+ * amdgpu controls only 1st ME(0-3 CS pipes).
+ */
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
+ if (i != ring->pipe)
+ gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
+
+ }
+}
+
+static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
+ .name = "gfx_v9_4_3",
+ .early_init = gfx_v9_4_3_early_init,
+ .late_init = gfx_v9_4_3_late_init,
+ .sw_init = gfx_v9_4_3_sw_init,
+ .sw_fini = gfx_v9_4_3_sw_fini,
+ .hw_init = gfx_v9_4_3_hw_init,
+ .hw_fini = gfx_v9_4_3_hw_fini,
+ .suspend = gfx_v9_4_3_suspend,
+ .resume = gfx_v9_4_3_resume,
+ .is_idle = gfx_v9_4_3_is_idle,
+ .wait_for_idle = gfx_v9_4_3_wait_for_idle,
+ .soft_reset = gfx_v9_4_3_soft_reset,
+ .set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
+ .set_powergating_state = gfx_v9_4_3_set_powergating_state,
+ .get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
+};
+
+static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
+ .type = AMDGPU_RING_TYPE_COMPUTE,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = true,
+ .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
+ .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
+ .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
+ .emit_frame_size =
+ 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
+ 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
+ 5 + /* hdp invalidate */
+ 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
+ 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
+ 7 + /* gfx_v9_4_3_emit_mem_sync */
+ 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
+ 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
+ .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
+ .emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
+ .emit_fence = gfx_v9_4_3_ring_emit_fence,
+ .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
+ .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
+ .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
+ .test_ring = gfx_v9_4_3_ring_test_ring,
+ .test_ib = gfx_v9_4_3_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
+ .emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
+};
+
+static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
+ .type = AMDGPU_RING_TYPE_KIQ,
+ .align_mask = 0xff,
+ .nop = PACKET3(PACKET3_NOP, 0x3FFF),
+ .support_64bit_ptrs = true,
+ .get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
+ .get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
+ .set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
+ .emit_frame_size =
+ 20 + /* gfx_v9_4_3_ring_emit_gds_switch */
+ 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
+ 5 + /* hdp invalidate */
+ 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
+ 2 + /* gfx_v9_4_3_ring_emit_vm_flush */
+ 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
+ .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */
+ .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
+ .test_ring = gfx_v9_4_3_ring_test_ring,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .emit_rreg = gfx_v9_4_3_ring_emit_rreg,
+ .emit_wreg = gfx_v9_4_3_ring_emit_wreg,
+ .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+};
+
+static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
+
+ for (j = 0; j < adev->gfx.num_compute_rings; j++)
+ adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
+ = &gfx_v9_4_3_ring_funcs_compute;
+ }
+}
+
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
+ .set = gfx_v9_4_3_set_eop_interrupt_state,
+ .process = gfx_v9_4_3_eop_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
+ .set = gfx_v9_4_3_set_priv_reg_fault_state,
+ .process = gfx_v9_4_3_priv_reg_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
+ .set = gfx_v9_4_3_set_priv_inst_fault_state,
+ .process = gfx_v9_4_3_priv_inst_irq,
+};
+
+static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
+ adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
+
+ adev->gfx.priv_reg_irq.num_types = 1;
+ adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
+
+ adev->gfx.priv_inst_irq.num_types = 1;
+ adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
+}
+
+static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
+{
+ adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
+}
+
+
+static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
+{
+ /* init asci gds info */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ /* 9.4.3 removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ adev->gds.gds_size = 0;
+ break;
+ default:
+ adev->gds.gds_size = 0x10000;
+ break;
+ }
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 3):
+ /* deprecated for 9.4.3, no usage at all */
+ adev->gds.gds_compute_max_wave_id = 0;
+ break;
+ default:
+ /* this really depends on the chip */
+ adev->gds.gds_compute_max_wave_id = 0x7ff;
+ break;
+ }
+
+ adev->gds.gws_size = 64;
+ adev->gds.oa_size = 16;
+}
+
+static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+ u32 bitmap)
+{
+ u32 data;
+
+ if (!bitmap)
+ return;
+
+ data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+ data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+ WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
+static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
+{
+ u32 data, mask;
+
+ data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
+ data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
+
+ data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+ data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+
+ mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
+
+ return (~data) & mask;
+}
+
+static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
+ struct amdgpu_cu_info *cu_info)
+{
+ int i, j, k, counter, active_cu_number = 0;
+ u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
+ unsigned disable_masks[4 * 4];
+
+ if (!adev || !cu_info)
+ return -EINVAL;
+
+ /*
+ * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
+ */
+ if (adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_sh_per_se > 16)
+ return -EINVAL;
+
+ amdgpu_gfx_parse_disable_cu(disable_masks,
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ mask = 1;
+ ao_bitmap = 0;
+ counter = 0;
+ gfx_v9_4_3_select_se_sh(adev, i, j, 0xffffffff, 0);
+ gfx_v9_4_3_set_user_cu_inactive_bitmap(
+ adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
+ bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
+
+ /*
+ * The bitmap(and ao_cu_bitmap) in cu_info structure is
+ * 4x4 size array, and it's usually suitable for Vega
+ * ASICs which has 4*2 SE/SH layout.
+ * But for Arcturus, SE/SH layout is changed to 8*1.
+ * To mostly reduce the impact, we make it compatible
+ * with current bitmap array as below:
+ * SE4,SH0 --> bitmap[0][1]
+ * SE5,SH0 --> bitmap[1][1]
+ * SE6,SH0 --> bitmap[2][1]
+ * SE7,SH0 --> bitmap[3][1]
+ */
+ cu_info->bitmap[i % 4][j + i / 4] = bitmap;
+
+ for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
+ if (bitmap & mask) {
+ if (counter < adev->gfx.config.max_cu_per_sh)
+ ao_bitmap |= mask;
+ counter++;
+ }
+ mask <<= 1;
+ }
+ active_cu_number += counter;
+ if (i < 2 && j < 2)
+ ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
+ cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
+ }
+ }
+ gfx_v9_4_3_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ cu_info->number = active_cu_number;
+ cu_info->ao_cu_mask = ao_cu_mask;
+ cu_info->simd_per_cu = NUM_SIMD_PER_CU;
+
+ return 0;
+}
+
+const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 9,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &gfx_v9_4_3_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
index 84e69701b81a..4b530f4c1295 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h
@@ -24,7 +24,6 @@
#ifndef __GFX_V9_4_3_H__
#define __GFX_V9_4_3_H__
-extern const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs;
-extern const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs;
+extern const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block;
#endif /* __GFX_V9_4_3_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
index c59c6c85fbff..79af32bb078c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c
@@ -43,19 +43,25 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
uint64_t page_table_base)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ int i;
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- hub->ctx_addr_distance * vmid,
- lower_32_bits(page_table_base));
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ WREG32_SOC15_OFFSET(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid,
+ lower_32_bits(page_table_base));
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- hub->ctx_addr_distance * vmid,
- upper_32_bits(page_table_base));
+ WREG32_SOC15_OFFSET(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid,
+ upper_32_bits(page_table_base));
+ }
}
static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev)
{
uint64_t pt_base;
+ int i;
if (adev->gmc.pdb0_bo)
pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
@@ -67,26 +73,36 @@ static void gfxhub_v1_2_init_gart_aperture_regs(struct amdgpu_device *adev)
/* If use GART for FB translation, vmid0 page table covers both
* vram and system memory (gart)
*/
- if (adev->gmc.pdb0_bo) {
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.fb_start >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.fb_start >> 44));
-
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
- } else {
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.gart_start >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.gart_start >> 44));
-
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ } else {
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, i,
+ regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
}
}
@@ -94,160 +110,183 @@ static void gfxhub_v1_2_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
uint32_t tmp;
+ int i;
- /* Program the AGP BAR */
- WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BASE, 0);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
- if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
- /* Program the system aperture low logical page number. */
- WREG32_SOC15_RLC(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the
- * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
- * So here is the workaround that increase system
- * aperture high address (add 1) to get rid of the VM
- * fault and hardware hang.
- */
- WREG32_SOC15_RLC(GC, 0,
- regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max((adev->gmc.fb_end >> 18) + 0x1,
- adev->gmc.agp_end >> 18));
- else
- WREG32_SOC15_RLC(GC, 0,
- regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
-
- /* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
-
- /* Program "protection fault". */
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
- (u32)(adev->dummy_page_addr >> 12));
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
- (u32)((u64)adev->dummy_page_addr >> 44));
-
- tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
- ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
- }
-
- /* In the case squeezing vram into GART aperture, we don't use
- * FB aperture and AGP aperture. Disable them.
- */
- if (adev->gmc.pdb0_bo) {
- WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_TOP, 0);
- WREG32_SOC15(GC, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
- WREG32_SOC15(GC, 0, regMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
- WREG32_SOC15(GC, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ /* Program the AGP BAR */
+ WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15_RLC(GC, i, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15_RLC(GC, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the
+ * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
+ * So here is the workaround that increase system
+ * aperture high address (add 1) to get rid of the VM
+ * fault and hardware hang.
+ */
+ WREG32_SOC15_RLC(GC, i,
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max((adev->gmc.fb_end >> 18) + 0x1,
+ adev->gmc.agp_end >> 18));
+ else
+ WREG32_SOC15_RLC(GC, i,
+ regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* Set default page address. */
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
+ WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+ }
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, i, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, i, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, i, regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, i, regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, i, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
}
}
static void gfxhub_v1_2_init_tlb_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ int i;
- /* Setup TLB control */
- tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
-
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL, 1);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
- MTYPE, MTYPE_UC);/* XXX for emulation. */
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
-
- WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, i, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15_RLC(GC, i, regMC_VM_MX_L1_TLB_CNTL, tmp);
+ }
}
static void gfxhub_v1_2_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ int i;
- /* Setup L2 cache */
- tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
- /* XXX for emulation, Refer to closed source code.*/
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
- 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL, tmp);
-
- tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL2);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL2, tmp);
-
- tmp = regVM_L2_CNTL3_DEFAULT;
- if (adev->gmc.translate_further) {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
- L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
- } else {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
- L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
- }
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL3, tmp);
-
- tmp = regVM_L2_CNTL4_DEFAULT;
- if (adev->gmc.xgmi.connected_to_cpu) {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(GC, i, regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15_RLC(GC, i, regVM_L2_CNTL4, tmp);
}
- WREG32_SOC15_RLC(GC, 0, regVM_L2_CNTL4, tmp);
}
static void gfxhub_v1_2_enable_system_domain(struct amdgpu_device *adev)
{
uint32_t tmp;
+ int i;
- tmp = RREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
- adev->gmc.vmid0_page_table_depth);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
- adev->gmc.vmid0_page_table_block_size);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
- WREG32_SOC15(GC, 0, regVM_CONTEXT0_CNTL, tmp);
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ tmp = RREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(GC, i, regVM_CONTEXT0_CNTL, tmp);
+ }
}
static void gfxhub_v1_2_disable_identity_aperture(struct amdgpu_device *adev)
{
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
- 0XFFFFFFFF);
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
- 0x0000000F);
-
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
- 0);
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
- 0);
-
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
- WREG32_SOC15(GC, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+ int i;
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ WREG32_SOC15(GC, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0XFFFFFFFF);
+ WREG32_SOC15(GC, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(GC, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
+ 0);
+ WREG32_SOC15(GC, i,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
+ 0);
+
+ WREG32_SOC15(GC, i,
+ regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
+ WREG32_SOC15(GC, i,
+ regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
+ }
}
static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
@@ -255,7 +294,7 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
unsigned num_level, block_size;
uint32_t tmp;
- int i;
+ int i, j;
num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size;
@@ -264,81 +303,89 @@ static void gfxhub_v1_2_setup_vmid_config(struct amdgpu_device *adev)
else
block_size -= 9;
- for (i = 0; i <= 14; i++) {
- tmp = RREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL, i);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
- num_level);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
- 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- block_size);
- /* Send no-retry XNACK on fault to suppress VM fault storm.
- * On Aldebaran, XNACK can be enabled in the SQ per-process.
- * Retry faults need to be enabled for that to work.
- */
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !adev->gmc.noretry ||
- adev->asic_type == CHIP_ALDEBARAN);
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT1_CNTL,
- i * hub->ctx_distance, tmp);
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
- i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
- i * hub->ctx_addr_distance, 0);
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
- i * hub->ctx_addr_distance,
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32_SOC15_OFFSET(GC, 0,
- regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
- i * hub->ctx_addr_distance,
- upper_32_bits(adev->vm_manager.max_pfn - 1));
+ for (j = 0; j < adev->gfx.num_xcd; j++) {
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* Send no-retry XNACK on fault to suppress VM fault storm.
+ * On Aldebaran, XNACK can be enabled in the SQ per-process.
+ * Retry faults need to be enabled for that to work.
+ */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !adev->gmc.noretry ||
+ adev->asic_type == CHIP_ALDEBARAN);
+ WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(GC, j,
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, j,
+ regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(GC, j,
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(GC, j,
+ regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
}
}
static void gfxhub_v1_2_program_invalidation(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
- unsigned i;
-
- for (i = 0 ; i < 18; ++i) {
- WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
- i * hub->eng_addr_distance, 0xffffffff);
- WREG32_SOC15_OFFSET(GC, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
- i * hub->eng_addr_distance, 0x1f);
+ unsigned i, j;
+
+ for (j = 0; j < adev->gfx.num_xcd; j++) {
+ for (i = 0 ; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(GC, j, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
}
}
static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ if (amdgpu_sriov_vf(adev)) {
/*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
* VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them
*/
- WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
+ WREG32_SOC15_RLC(GC, i, regMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15_RLC(GC, i, regMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
}
/* GART Enable. */
@@ -361,27 +408,29 @@ static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
{
struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
u32 tmp;
- u32 i;
-
- /* Disable all tables */
- for (i = 0; i < 16; i++)
- WREG32_SOC15_OFFSET(GC, 0, regVM_CONTEXT0_CNTL,
- i * hub->ctx_distance, 0);
-
- /* Setup TLB control */
- tmp = RREG32_SOC15(GC, 0, regMC_VM_MX_L1_TLB_CNTL);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
- tmp = REG_SET_FIELD(tmp,
- MC_VM_MX_L1_TLB_CNTL,
- ENABLE_ADVANCED_DRIVER_MODEL,
- 0);
- WREG32_SOC15_RLC(GC, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
-
- /* Setup L2 cache */
- tmp = RREG32_SOC15(GC, 0, regVM_L2_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, 0, regVM_L2_CNTL, tmp);
- WREG32_SOC15(GC, 0, regVM_L2_CNTL3, 0);
+ u32 i, j;
+
+ for (j = 0; j < adev->gfx.num_xcd; j++) {
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(GC, j, regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(GC, j, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp,
+ MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL,
+ 0);
+ WREG32_SOC15_RLC(GC, j, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(GC, j, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, j, regVM_L2_CNTL, tmp);
+ WREG32_SOC15(GC, j, regVM_L2_CNTL3, 0);
+ }
}
/**
@@ -394,38 +443,42 @@ static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
bool value)
{
u32 tmp;
- tmp = RREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp,
- VM_L2_PROTECTION_FAULT_CNTL,
- TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
- value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
- if (!value) {
+ int i;
+
+ for (i = 0; i < adev->gfx.num_xcd; i++) {
+ tmp = RREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp,
+ VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- CRASH_ON_NO_RETRY_FAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
- CRASH_ON_RETRY_FAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+ WREG32_SOC15(GC, i, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
- WREG32_SOC15(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
static void gfxhub_v1_2_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index b213dcf8ca06..5697b66bf0de 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -343,7 +343,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
*/
- if (adev->gfx.kiq.ring.sched.ready && !adev->enable_mes &&
+ if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
down_read_trylock(&adev->reset_domain->sem)) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
@@ -428,11 +428,11 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried_pasid;
bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
@@ -440,12 +440,12 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index d95f9fe8f1c5..f73c238f3145 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -291,7 +291,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* For SRIOV run time, driver shouldn't access the register through MMIO
* Directly use kiq to do the vm invalidation instead
*/
- if ((adev->gfx.kiq.ring.sched.ready || adev->mes.ring.sched.ready) &&
+ if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
@@ -329,11 +329,11 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq;
uint16_t queried_pasid;
bool ret;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_emu_mode == 0 && ring->sched.ready) {
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
@@ -341,12 +341,12 @@ static int gmc_v11_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
return -ETIME;
}
amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe21cefd772..6ae5cee9b64b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -824,7 +824,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
*/
- if (adev->gfx.kiq.ring.sched.ready &&
+ if (adev->gfx.kiq[0].ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
down_read_trylock(&adev->reset_domain->sem)) {
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
@@ -934,8 +934,8 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried_pasid;
bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
- struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &adev->gfx.kiq[0].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
if (amdgpu_in_reset(adev))
return -EIO;
@@ -955,7 +955,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
if (vega20_xgmi_wa)
ndw += kiq->pmf->invalidate_tlbs_size;
- spin_lock(&adev->gfx.kiq.ring_lock);
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
/* 2 dwords flush + 8 dwords fence */
amdgpu_ring_alloc(ring, ndw);
if (vega20_xgmi_wa)
@@ -966,13 +966,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
if (r) {
amdgpu_ring_undo(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
up_read(&adev->reset_domain->sem);
return -ETIME;
}
amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq.ring_lock);
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 2e2062636d5f..4560476c7c31 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -797,8 +797,8 @@ static void mes_v10_1_queue_init_register(struct amdgpu_ring *ring)
static int mes_v10_1_kiq_enable_queue(struct amdgpu_device *adev)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@@ -863,9 +863,9 @@ static int mes_v10_1_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- spin_lock_init(&adev->gfx.kiq.ring_lock);
+ spin_lock_init(&adev->gfx.kiq[0].ring_lock);
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
@@ -891,7 +891,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@@ -901,6 +901,7 @@ static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev,
return 0;
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
@@ -974,15 +975,15 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
- amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
- &adev->gfx.kiq.ring.mqd_gpu_addr,
- &adev->gfx.kiq.ring.mqd_ptr);
+ amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+ &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+ &adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
- amdgpu_ring_fini(&adev->gfx.kiq.ring);
+ amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
amdgpu_mes_fini(adev);
@@ -1038,7 +1039,7 @@ static int mes_v10_1_kiq_hw_init(struct amdgpu_device *adev)
mes_v10_1_enable(adev, true);
- mes_v10_1_kiq_setting(&adev->gfx.kiq.ring);
+ mes_v10_1_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v10_1_queue_init(adev);
if (r)
@@ -1090,7 +1091,7 @@ static int mes_v10_1_hw_init(void *handle)
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 45280f047180..3adb450eec07 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -864,8 +864,8 @@ static void mes_v11_0_queue_init_register(struct amdgpu_ring *ring)
static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
{
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
int r;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
@@ -894,7 +894,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
int r;
if (pipe == AMDGPU_MES_KIQ_PIPE)
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@@ -961,9 +961,9 @@ static int mes_v11_0_kiq_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- spin_lock_init(&adev->gfx.kiq.ring_lock);
+ spin_lock_init(&adev->gfx.kiq[0].ring_lock);
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
ring->me = 3;
ring->pipe = 1;
@@ -989,7 +989,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
if (pipe == AMDGPU_MES_KIQ_PIPE)
- ring = &adev->gfx.kiq.ring;
+ ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
else
@@ -999,6 +999,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
return 0;
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
@@ -1074,15 +1075,15 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
- amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
- &adev->gfx.kiq.ring.mqd_gpu_addr,
- &adev->gfx.kiq.ring.mqd_ptr);
+ amdgpu_bo_free_kernel(&adev->gfx.kiq[0].ring.mqd_obj,
+ &adev->gfx.kiq[0].ring.mqd_gpu_addr,
+ &adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
- amdgpu_ring_fini(&adev->gfx.kiq.ring);
+ amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
@@ -1175,7 +1176,7 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true);
- mes_v11_0_kiq_setting(&adev->gfx.kiq.ring);
+ mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
@@ -1196,7 +1197,7 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev)) {
- mes_v11_0_kiq_dequeue(&adev->gfx.kiq.ring);
+ mes_v11_0_kiq_dequeue(&adev->gfx.kiq[0].ring);
mes_v11_0_kiq_clear(adev);
}
@@ -1244,7 +1245,7 @@ static int mes_v11_0_hw_init(void *handle)
* MES uses KIQ ring exclusively so driver cannot access KIQ ring
* with MES enabled.
*/
- adev->gfx.kiq.ring.sched.ready = false;
+ adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 98c826f1f89b..a5f76c9538c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -98,6 +98,16 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
};
/* Sienna Cichlid */
+static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_encode = {
+ .codec_count = ARRAY_SIZE(sc_video_codecs_encode_array),
+ .codec_array = sc_video_codecs_encode_array,
+};
+
static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
@@ -136,8 +146,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
};
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
@@ -237,12 +247,12 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
} else {
if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn1;
} else {
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
}
@@ -251,14 +261,14 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 0, 2):
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode_vcn0;
return 0;
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
if (encode)
- *codecs = &nv_video_codecs_encode;
+ *codecs = &sc_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;
@@ -371,12 +381,12 @@ static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -622,9 +632,9 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
bool enter)
{
if (enter)
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
else
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index fd6b58243b03..631dafb92299 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -462,6 +462,9 @@
# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
#define PACKET3_RUN_LIST 0xA5
#define PACKET3_MAP_PROCESS_VM 0xA6
-
+/* GFX11 */
+#define PACKET3_SET_Q_PREEMPTION_MODE 0xF0
+# define PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(x) ((x) << 0)
+# define PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM (1 << 0)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
index 6f9895cdddb1..0ddb6955a6d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -141,6 +141,10 @@ static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED),
0, 0,
},
+ { "SDMA_UTCL1_WR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_WR_FIFO_SED),
+ 0, 0,
+ },
{ "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED),
0, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 7f99e130acd0..f64b87b11b1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1181,12 +1181,12 @@ static uint32_t si_get_register_value(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 6d15d5cd9e07..3221522e71e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -311,7 +311,7 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
void soc15_grbm_select(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 queue, u32 vmid)
+ u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id)
{
u32 grbm_gfx_cntl = 0;
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
@@ -319,7 +319,7 @@ void soc15_grbm_select(struct amdgpu_device *adev,
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
- WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
+ WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
}
static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -363,12 +363,12 @@ static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_n
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index efc2a253e8db..2b41ee968dd1 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -100,7 +100,7 @@ struct soc15_ras_field_entry {
#define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift)
void soc15_grbm_select(struct amdgpu_device *adev,
- u32 me, u32 pipe, u32 queue, u32 vmid);
+ u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id);
void soc15_set_virt_ops(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 744be2a05623..0f82b8e83acb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -288,12 +288,12 @@ static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_n
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
}
@@ -542,9 +542,9 @@ static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
bool enter)
{
if (enter)
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
else
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 531f173ade2d..8e70581960fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -762,12 +762,12 @@ static uint32_t vi_get_register_value(struct amdgpu_device *adev,
mutex_lock(&adev->grbm_idx_mutex);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
val = RREG32(reg_offset);
if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
mutex_unlock(&adev->grbm_idx_mutex);
return val;
} else {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 96a138a39515..c02430537e9c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -735,7 +735,9 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
- *update_mapping = true;
+ if (!p->xnack_enabled)
+ *update_mapping = true;
+
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8b4b186c57f5..2bbb2988942d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1646,11 +1646,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
- /* Disable SubVP + DRR config by default */
- init_data.flags.disable_subvp_drr = true;
- if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
- init_data.flags.disable_subvp_drr = false;
-
init_data.flags.seamless_boot_edp_requested = false;
if (check_seamless_boot_capability(adev)) {
@@ -1776,12 +1771,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
- if (!adev->dm.secure_display_ctxs) {
- DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
- }
-#endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
@@ -1840,6 +1829,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs)
+ DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
@@ -6342,6 +6336,31 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
return 0;
}
+static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ struct edid *edid;
+
+ if (!connector->edid_override)
+ return;
+
+ drm_edid_override_connector_update(&aconnector->base);
+ edid = aconnector->base.edid_blob_ptr->data;
+ aconnector->edid = edid;
+
+ /* Update emulated (virtual) sink's EDID */
+ if (dc_em_sink && dc_link) {
+ memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+ memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ dm_helpers_parse_edid_caps(
+ dc_link,
+ &dc_em_sink->dc_edid,
+ &dc_em_sink->edid_caps);
+ }
+}
+
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect,
@@ -6352,7 +6371,8 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
.late_register = amdgpu_dm_connector_late_register,
- .early_unregister = amdgpu_dm_connector_unregister
+ .early_unregister = amdgpu_dm_connector_unregister,
+ .force = amdgpu_dm_connector_funcs_force
};
static int get_modes(struct drm_connector *connector)
@@ -6369,11 +6389,20 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
struct edid *edid;
if (!aconnector->base.edid_blob_ptr) {
- DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
- aconnector->base.name);
+ /* if connector->edid_override valid, pass
+ * it to edid_override to edid_blob_ptr
+ */
+ int count;
- aconnector->base.force = DRM_FORCE_OFF;
- return;
+ count = drm_edid_override_connector_update(&aconnector->base);
+
+ if (!aconnector->base.edid_blob_ptr) {
+ DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+ aconnector->base.name);
+
+ aconnector->base.force = DRM_FORCE_OFF;
+ return;
+ }
}
edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
@@ -10323,7 +10352,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
input->cea_total_length = total_length;
memcpy(input->payload, data, length);
- res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
+ res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
if (!res) {
DRM_ERROR("EDID CEA parser failed\n");
return false;
@@ -10773,3 +10802,13 @@ bool check_seamless_boot_capability(struct amdgpu_device *adev)
return false;
}
+
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+}
+
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 27711743c22c..0802f8e8fac5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -83,12 +83,15 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
+static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream)
{
struct drm_device *drm_dev = crtc->dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ bool was_activated;
spin_lock_irq(&drm_dev->event_lock);
+ was_activated = acrtc->dm_irq_params.window_param.activated;
acrtc->dm_irq_params.window_param.x_start = 0;
acrtc->dm_irq_params.window_param.y_start = 0;
acrtc->dm_irq_params.window_param.x_end = 0;
@@ -97,6 +100,14 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
acrtc->dm_irq_params.window_param.update_win = false;
acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
spin_unlock_irq(&drm_dev->event_lock);
+
+ /* Disable secure_display if it was enabled */
+ if (was_activated) {
+ /* stop ROI update on this crtc */
+ flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work);
+ flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work);
+ dc_stream_forward_crc_window(stream, NULL, true);
+ }
}
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
@@ -204,9 +215,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
{
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- int i;
-#endif
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
@@ -220,19 +228,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
/* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- /* Disable secure_display if it was enabled */
- if (!enable) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
- /* stop ROI update on this crtc */
- flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
- flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
- dc_stream_forward_crc_window(stream_state, NULL, true);
- }
- }
- }
-#endif
if (!dc_stream_configure_crc(stream_state->ctx->dc,
stream_state, NULL, enable, enable)) {
ret = -EINVAL;
@@ -363,7 +358,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/* Reset secure_display when we change crc source from debugfs */
- amdgpu_dm_set_crc_window_default(crtc);
+ amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream);
#endif
if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 935adca6f048..748e80ef40d0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -100,7 +100,7 @@ struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
-#define amdgpu_dm_crtc_secure_display_create_contexts()
+#define amdgpu_dm_crtc_secure_display_create_contexts(x)
#endif
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c6ce2b7123b7..09e056a64708 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -885,10 +885,34 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
+ if (link->aux_mode) {
+ union test_request test_request = {0};
+ union test_response test_response = {0};
- /* DP Compliance Test 4.2.2.3 */
- if (link->aux_mode)
- drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
+ dm_helpers_dp_read_dpcd(ctx,
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+
+ if (!test_request.bits.EDID_READ)
+ return edid_status;
+
+ test_response.bits.EDID_CHECKSUM_WRITE = 1;
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_EDID_CHECKSUM,
+ &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
+ 1);
+
+ dm_helpers_dp_write_dpcd(ctx,
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+
+ }
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 810ab682f424..46d0a8f57e55 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -45,8 +45,7 @@
#endif
#include "dc/dcn20/dcn20_resource.h"
-bool is_timing_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream);
+
#define PEAK_FACTOR_X1000 1006
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
@@ -1422,7 +1421,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
struct dc_stream_state *stream = dm_state->context->streams[i];
if (local_dc_state->streams[i] &&
- is_timing_changed(stream, local_dc_state->streams[i])) {
+ dc_is_timing_changed(stream, local_dc_state->streams[i])) {
DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
} else {
int ind = find_crtc_index_in_state_by_stream(state, stream);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 1ef9e4053bb7..90a02d7bd3da 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -123,9 +123,7 @@ static void encoder_control_dmcub(
sizeof(cmd.digx_encoder_control.header);
cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result encoder_control_digx_v1_5(
@@ -261,9 +259,7 @@ static void transmitter_control_dmcub(
sizeof(cmd.dig1_transmitter_control.header);
cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result transmitter_control_v1_6(
@@ -325,9 +321,7 @@ static void transmitter_control_dmcub_v1_7(
sizeof(cmd.dig1_transmitter_control.header);
cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result transmitter_control_v1_7(
@@ -435,9 +429,7 @@ static void set_pixel_clock_dmcub(
sizeof(cmd.set_pixel_clock.header);
cmd.set_pixel_clock.pixel_clock.clk = *clk;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result set_pixel_clock_v7(
@@ -804,9 +796,7 @@ static void enable_disp_power_gating_dmcub(
sizeof(cmd.enable_disp_power_gating.header);
cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result enable_disp_power_gating_v2_1(
@@ -1016,10 +1006,7 @@ static void enable_lvtma_control_dmcub(
panel_instance;
cmd.lvtma_control.data.bypass_panel_control_wait =
bypass_panel_control_wait;
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- dc_dmub_srv_cmd_execute(dmcub);
- dc_dmub_srv_wait_idle(dmcub);
-
+ dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static enum bp_result enable_lvtma_control(
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 694a9d3d92ae..3908e7cfd6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -206,7 +206,6 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false;
bool update_uclk = false;
bool p_state_change_support;
- int total_plane_count;
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
return;
@@ -247,8 +246,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ p_state_change_support = new_clocks->p_state_change_support;
// invalidate the current P-State forced min in certain dc_mode_softmax situations
if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 1fbf1c105dc1..bdbf18306698 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -312,6 +312,9 @@ void dcn30_smu_set_display_refresh_from_mall(struct clk_mgr_internal *clk_mgr, b
/* bits 8:7 for cache timer scale, bits 6:1 for cache timer delay, bit 0 = 1 for enable, = 0 for disable */
uint32_t param = (cache_timer_scale << 7) | (cache_timer_delay << 1) | (enable ? 1 : 0);
+ smu_print("SMU Set display refresh from mall: enable = %d, cache_timer_delay = %d, cache_timer_scale = %d\n",
+ enable, cache_timer_delay, cache_timer_scale);
+
dcn30_smu_send_msg_with_param(clk_mgr,
DALSMC_MSG_SetDisplayRefreshFromMall, param, NULL);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index f9e2e0c3095e..3c743cd3d3f0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -250,9 +250,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index 0827c7df2855..32279c5db724 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -130,7 +130,7 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
if (result == VBIOSSMC_Result_Failed) {
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
- DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ DC_LOG_DEBUG("Watermarks table not configured properly by SMU");
else
ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 5cb44f838bde..4d5cd59f6433 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -286,9 +286,7 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index 0765334f0825..07baa10a8647 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -145,7 +145,7 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
if (result == VBIOSSMC_Result_Failed) {
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
- DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ DC_LOG_DEBUG("Watermarks table not configured properly by SMU");
else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index b737cbc468f5..300c6a5872d0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -234,9 +234,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 93db4dbee713..538126cefd4d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -254,9 +254,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 8d9444db092a..85e963ec25ab 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -233,6 +233,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
DC_FP_END();
}
+static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context,
+ int ref_dtbclk_khz)
+{
+ struct dccg *dccg = clk_mgr->dccg;
+ uint32_t tg_mask = 0;
+ int i;
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dtbclk_dto_params dto_params = {0};
+
+ /* use mask to program DTO once per tg */
+ if (pipe_ctx->stream_res.tg &&
+ !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
+ tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
+
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
+
+ dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ }
+ }
+}
+
/* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming),
* update DPPCLK to be the exact frequency that will be set after the DPPCLK
* divider is updated. This will prevent rounding issues that could cause DPP
@@ -433,7 +459,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
bool update_uclk = false, update_fclk = false;
bool p_state_change_support;
bool fclk_p_state_change_support;
- int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
@@ -462,8 +487,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.fclk_prev_p_state_change_support = clk_mgr_base->clks.fclk_p_state_change_support;
- total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
- fclk_p_state_change_support = new_clocks->fclk_p_state_change_support || (total_plane_count == 0);
+ fclk_p_state_change_support = new_clocks->fclk_p_state_change_support;
if (should_update_pstate_support(safe_to_lower, fclk_p_state_change_support, clk_mgr_base->clks.fclk_p_state_change_support)) {
clk_mgr_base->clks.fclk_p_state_change_support = fclk_p_state_change_support;
@@ -502,8 +526,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
dcn32_smu_send_cab_for_uclk_message(clk_mgr, clk_mgr_base->clks.num_ways);
}
-
- p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ p_state_change_support = new_clocks->p_state_change_support;
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
@@ -570,6 +593,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* DCCG requires KHz precision for DTBCLK */
clk_mgr_base->clks.ref_dtbclk_khz =
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+ dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
}
if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 52564b93f7eb..965b348dc8f1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -515,8 +515,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
cmd.secure_display.roi_info.y_end = rect->y + rect->height;
}
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
+ dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
static inline void
@@ -1269,7 +1268,7 @@ static void disable_vbios_mode_if_required(
if (pix_clk_100hz != requested_pix_clk_100hz) {
dc->link_srv->set_dpms_off(pipe);
- pipe->stream->dpms_off = false;
+ pipe->stream->dpms_off = true;
}
}
}
@@ -2483,9 +2482,6 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (u->flip_addr)
- update_flags->bits.addr_update = 1;
-
if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
return UPDATE_TYPE_FULL;
@@ -2605,8 +2601,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->mst_bw_update)
su_flags->bits.mst_bw = 1;
- if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
- su_flags->bits.crtc_timing_adjust = 1;
+
+ if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
+ (stream_update->vrr_infopacket || stream_update->allow_freesync ||
+ stream_update->vrr_active_variable))
+ su_flags->bits.fams_changed = 1;
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -3269,6 +3268,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.prepare_bandwidth(dc, dc->current_state);
dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
+ } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
+ && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ /*
+ * Workaround for firmware issue in some receivers where they don't pick up
+ * correct output color space unless DP link is disabled/re-enabled
+ */
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -3309,7 +3315,6 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
struct dc_state *context)
{
union dmub_rb_cmd cmd;
- struct dc_context *dc_ctx = dc->ctx;
struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
unsigned int i, j;
unsigned int panel_inst = 0;
@@ -3350,8 +3355,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
update_dirty_rect->panel_inst = panel_inst;
update_dirty_rect->pipe_idx = j;
- dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
}
}
@@ -4264,9 +4268,6 @@ void dc_set_power_state(
dc_z10_restore(dc);
- if (dc->ctx->dmub_srv)
- dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
-
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -4606,7 +4607,6 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
{
uint8_t action;
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
ASSERT(payload->length <= 16);
@@ -4654,9 +4654,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
);
}
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -4700,7 +4698,6 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
struct dmub_notification *notify)
{
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
bool is_cmd_complete = true;
/* prepare SET_CONFIG command */
@@ -4711,7 +4708,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
- if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
+ if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
/* command is not processed by dmub */
notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
return is_cmd_complete;
@@ -4746,7 +4743,6 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
uint8_t *mst_slots_in_use)
{
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
/* prepare MST_ALLOC_SLOTS command */
cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
@@ -4755,7 +4751,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
- if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
+ if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
/* command is not processed by dmub */
return DC_ERROR_UNEXPECTED;
@@ -4789,14 +4785,11 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
uint32_t hpd_int_enable)
{
union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
}
@@ -4860,21 +4853,3 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
}
-
-/**
- * dc_extended_blank_supported - Decide whether extended blank is supported
- *
- * @dc: [in] Current DC state
- *
- * Extended blank is a freesync optimization feature to be enabled in the
- * future. During the extra vblank period gained from freesync, we have the
- * ability to enter z9/z10.
- *
- * Return:
- * Indicate whether extended blank is supported (%true or %false)
- */
-bool dc_extended_blank_supported(struct dc *dc)
-{
- return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
- && dc->caps.zstate_support && dc->caps.is_apu;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 117d80cb36fb..7e1e5532f88f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -69,6 +69,7 @@
#include "../dcn32/dcn32_resource.h"
#include "../dcn321/dcn321_resource.h"
+
#define DC_LOGGER_INIT(logger)
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
@@ -1858,7 +1859,7 @@ bool dc_add_all_planes_for_stream(
return add_all_planes_for_stream(dc, stream, &set, 1, context);
}
-bool is_timing_changed(struct dc_stream_state *cur_stream,
+bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
if (cur_stream == NULL)
@@ -1883,7 +1884,7 @@ static bool are_stream_backends_same(
if (stream_a == NULL || stream_b == NULL)
return false;
- if (is_timing_changed(stream_a, stream_b))
+ if (dc_is_timing_changed(stream_a, stream_b))
return false;
if (stream_a->signal != stream_b->signal)
@@ -3508,7 +3509,7 @@ bool pipe_need_reprogram(
if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
return true;
- if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ if (dc_is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 30f0ba05a6e6..7e3f20a3a02f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.230"
+#define DC_VER "3.2.234"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -209,6 +209,8 @@ struct dc_color_caps {
struct dc_dmub_caps {
bool psr;
bool mclk_sw;
+ bool subvp_psr;
+ bool gecc_enable;
};
struct dc_caps {
@@ -698,6 +700,8 @@ struct dc_virtual_addr_space_config {
struct dc_bounding_box_overrides {
int sr_exit_time_ns;
int sr_enter_plus_exit_time_ns;
+ int sr_exit_z8_time_ns;
+ int sr_enter_plus_exit_z8_time_ns;
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
@@ -767,6 +771,8 @@ struct dc_debug_options {
int sr_enter_plus_exit_time_dpm0_ns;
int sr_exit_time_ns;
int sr_enter_plus_exit_time_ns;
+ int sr_exit_z8_time_ns;
+ int sr_enter_plus_exit_z8_time_ns;
int urgent_latency_ns;
uint32_t underflow_assert_delay_us;
int percent_of_ideal_drambw;
@@ -855,7 +861,6 @@ struct dc_debug_options {
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
- uint32_t fixed_vs_aux_delay_config_wa;
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
uint32_t mst_start_top_delay;
@@ -879,6 +884,10 @@ struct dc_debug_options {
uint32_t fpo_vactive_margin_us;
bool disable_fpo_vactive;
bool disable_boot_optimizations;
+ bool override_odm_optimization;
+ bool minimize_dispclk_using_odm;
+ bool disable_subvp_high_refresh;
+ bool disable_dp_plus_plus_wa;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1502,6 +1511,7 @@ struct dc_link {
/* Forced DPIA into TBT3 compatibility mode. */
bool dpia_forced_tbt3_mode;
bool dongle_mode_timing_override;
+ bool blank_stream_on_ocs_change;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -2126,8 +2136,6 @@ struct dc_sink_init_data {
bool converter_disable_audio;
};
-bool dc_extended_blank_supported(struct dc *dc);
-
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
/* Newer interfaces */
@@ -2226,4 +2234,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
/* Disable acc mode Interfaces */
void dc_disable_accelerated_mode(struct dc *dc);
+bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream);
+
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index a9b9490a532c..0319a30f2d5c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -65,47 +65,6 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
}
-void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- union dmub_rb_cmd *cmd)
-{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
- enum dmub_status status;
-
- status = dmub_srv_cmd_queue(dmub, cmd);
- if (status == DMUB_STATUS_OK)
- return;
-
- if (status != DMUB_STATUS_QUEUE_FULL)
- goto error;
-
- /* Execute and wait for queue to become empty again. */
- dc_dmub_srv_cmd_execute(dc_dmub_srv);
- dc_dmub_srv_wait_idle(dc_dmub_srv);
-
- /* Requeue the command. */
- status = dmub_srv_cmd_queue(dmub, cmd);
- if (status == DMUB_STATUS_OK)
- return;
-
-error:
- DC_ERROR("Error queuing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
-}
-
-void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
-{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
- enum dmub_status status;
-
- status = dmub_srv_cmd_execute(dmub);
- if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
- }
-}
-
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
@@ -159,50 +118,88 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
}
}
-bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
+bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
+}
+
+bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type)
{
+ struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
+ int i;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
+ dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
- status = dmub_srv_cmd_with_reply_data(dmub, cmd);
+ for (i = 0 ; i < count; i++) {
+ // Queue command
+ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+
+ if (status == DMUB_STATUS_QUEUE_FULL) {
+ /* Execute and wait for queue to become empty again. */
+ dmub_srv_cmd_execute(dmub);
+ dmub_srv_wait_for_idle(dmub, 100000);
+
+ /* Requeue the command. */
+ status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+ }
+
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ return false;
+ }
+ }
+
+ status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
return false;
}
+ // Wait for DMUB to process command
+ if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+
+ if (status != DMUB_STATUS_OK) {
+ DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+ return false;
+ }
+
+ // Copy data back from ring buffer into command
+ if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+ dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+ }
+
return true;
}
-void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv)
{
- struct dmub_srv *dmub = dc_dmub_srv->dmub;
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dmub_srv *dmub;
+ struct dc_context *dc_ctx;
+ union dmub_fw_boot_status boot_status;
enum dmub_status status;
- for (;;) {
- /* Wait up to a second for PHY init. */
- status = dmub_srv_wait_for_phy_init(dmub, 1000000);
- if (status == DMUB_STATUS_OK)
- /* Initialization OK */
- break;
-
- DC_ERROR("DMCUB PHY init failed: status=%d\n", status);
- ASSERT(0);
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return false;
- if (status != DMUB_STATUS_TIMEOUT)
- /*
- * Server likely initialized or we don't have
- * DMCUB HW support - this won't end.
- */
- break;
+ dmub = dc_dmub_srv->dmub;
+ dc_ctx = dc_dmub_srv->ctx;
- /* Continue spinning so we don't hang the ASIC. */
+ status = dmub_srv_get_fw_boot_status(dmub, &boot_status);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error querying DMUB boot status: error=%d\n", status);
+ return false;
}
+
+ return boot_status.bits.optimized_init_done;
}
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
@@ -267,9 +264,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
// Send the command to the DMCUB.
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
@@ -283,9 +278,7 @@ void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
// Send the command to the DMCUB.
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
@@ -378,21 +371,14 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header);
// Send the command to the DMCUB.
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
-void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
+void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv)
{
union dmub_rb_cmd cmd = { 0 };
- enum dmub_status status;
-
- if (!dmub) {
- return;
- }
memset(&cmd, 0, sizeof(cmd));
@@ -402,15 +388,10 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
cmd.query_feature_caps.header.ret_status = 1;
cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data);
- /* Send command to fw */
- status = dmub_srv_cmd_with_reply_data(dmub, &cmd);
-
- ASSERT(status == DMUB_STATUS_OK);
-
/* If command was processed, copy feature caps to dmub srv */
- if (status == DMUB_STATUS_OK &&
+ if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.query_feature_caps.header.ret_status == 0) {
- memcpy(&dmub->feature_caps,
+ memcpy(&dc_dmub_srv->dmub->feature_caps,
&cmd.query_feature_caps.query_feature_caps_data,
sizeof(struct dmub_feature_caps));
}
@@ -419,7 +400,6 @@ void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub)
void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
union dmub_rb_cmd cmd = { 0 };
- enum dmub_status status;
unsigned int panel_inst = 0;
dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst);
@@ -433,13 +413,8 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data);
cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst;
- // Send command to fw
- status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd);
-
- ASSERT(status == DMUB_STATUS_OK);
-
// If command was processed, copy feature caps to dmub srv
- if (status == DMUB_STATUS_OK &&
+ if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.visual_confirm_color.header.ret_status == 0) {
memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color,
&cmd.visual_confirm_color.visual_confirm_color_data,
@@ -797,9 +772,8 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF;
}
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data)
@@ -982,14 +956,6 @@ static void dc_build_cursor_update_payload0(
payload->panel_inst = panel_inst;
}
-static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv,
- union dmub_rb_cmd *cmd)
-{
- dc_dmub_srv_cmd_queue(dmub_srv, cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
-}
-
static void dc_build_cursor_position_update_payload0(
struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx,
const struct hubp *hubp, const struct dpp *dpp)
@@ -1032,9 +998,11 @@ static void dc_build_cursor_attribute_update_payload1(
void dc_send_update_cursor_info_to_dmu(
struct pipe_ctx *pCtx, uint8_t pipe_idx)
{
- union dmub_rb_cmd cmd = { 0 };
- union dmub_cmd_update_cursor_info_data *update_cursor_info =
- &cmd.update_cursor_info.update_cursor_info_data;
+ union dmub_rb_cmd cmd[2];
+ union dmub_cmd_update_cursor_info_data *update_cursor_info_0 =
+ &cmd[0].update_cursor_info.update_cursor_info_data;
+
+ memset(cmd, 0, sizeof(cmd));
if (!dc_dmub_should_update_cursor_data(pCtx))
return;
@@ -1051,31 +1019,28 @@ void dc_send_update_cursor_info_to_dmu(
{
/* Build Payload#0 Header */
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes =
- sizeof(cmd.update_cursor_info.update_cursor_info_data);
- cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */
+ cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd[0].update_cursor_info.header.payload_bytes =
+ sizeof(cmd[0].update_cursor_info.update_cursor_info_data);
+ cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd
/* Prepare Payload */
- dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0);
+ dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0);
- dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx,
+ dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx,
pCtx->plane_res.hubp, pCtx->plane_res.dpp);
- /* Send update_curosr_info to queue */
- dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd);
- }
+ }
{
/* Build Payload#1 Header */
- memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data));
- cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
- cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
- cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */
+ cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO;
+ cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg);
+ cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command.
dc_build_cursor_attribute_update_payload1(
- &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
+ &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg,
pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp);
/* Combine 2nd cmds update_curosr_info to DMU */
- dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd);
+ dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index d34f5563df2e..a5196a9292b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -26,7 +26,7 @@
#ifndef _DMUB_DC_SRV_H_
#define _DMUB_DC_SRV_H_
-#include "os_types.h"
+#include "dm_services_types.h"
#include "dmub/dmub_srv.h"
struct dmub_srv;
@@ -52,16 +52,13 @@ struct dc_dmub_srv {
void *dm;
};
-void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- union dmub_rb_cmd *cmd);
-
-void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
-
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
-void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
+bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
+
+bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
-bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd);
+bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type);
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
unsigned int stream_mask);
@@ -77,7 +74,7 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst);
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
-void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
+void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 49aab1924665..4a7f6497dc5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -566,6 +566,12 @@ struct dpcd_amd_device_id {
uint8_t dal_version_byte2;
};
+struct target_luminance_value {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+};
+
struct dpcd_source_backlight_set {
struct {
uint8_t byte0;
@@ -1225,6 +1231,7 @@ struct dpcd_caps {
union dp_main_line_channel_coding_cap channel_coding_cap;
union dp_sink_video_fallback_formats fallback_formats;
union dp_fec_capability1 fec_cap1;
+ bool panel_luminance_control;
union dp_cable_id cable_id;
uint8_t edp_rev;
union edp_alpm_caps alpm_caps;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 0e92a322c2ed..9491b76d61f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -58,6 +58,7 @@ struct dc_dsc_config_options {
uint32_t dsc_min_slice_height_override;
uint32_t max_target_bpp_limit_override_x16;
uint32_t slice_height_granularity;
+ uint32_t dsc_force_odm_hslice_override;
};
bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index f43cce16bb6c..a21948267c0f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -41,19 +41,13 @@ static inline void submit_dmub_read_modify_write(
const struct dc_context *ctx)
{
struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
- bool gather = false;
offload->should_burst_write =
(offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
cmd_buf->header.payload_bytes =
sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
- gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
-
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
-
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+ dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
@@ -66,17 +60,11 @@ static inline void submit_dmub_burst_write(
const struct dc_context *ctx)
{
struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
- bool gather = false;
cmd_buf->header.payload_bytes =
sizeof(uint32_t) * offload->reg_seq_count;
- gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
-
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
-
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
+ dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
@@ -88,17 +76,11 @@ static inline void submit_dmub_reg_wait(
const struct dc_context *ctx)
{
struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
- bool gather = false;
- gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
-
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
+ dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
-
- ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
}
struct dc_reg_value_masks {
@@ -151,7 +133,6 @@ static void dmub_flush_buffer_execute(
const struct dc_context *ctx)
{
submit_dmub_read_modify_write(offload, ctx);
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
static void dmub_flush_burst_write_buffer_execute(
@@ -159,7 +140,6 @@ static void dmub_flush_burst_write_buffer_execute(
const struct dc_context *ctx)
{
submit_dmub_burst_write(offload, ctx);
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
@@ -691,8 +671,6 @@ void reg_sequence_start_execute(const struct dc_context *ctx)
default:
return;
}
-
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 25284006019c..0add5ecc895f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -131,6 +131,7 @@ union stream_update_flags {
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
+ uint32_t fams_changed : 1;
} bits;
uint32_t raw;
@@ -171,6 +172,10 @@ struct mall_temp_config {
bool is_phantom_plane[MAX_PIPES];
};
+struct dc_stream_debug_options {
+ char force_odm_combine_segments;
+};
+
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
@@ -181,6 +186,7 @@ struct dc_stream_state {
* a stream via the volatile dc_state rather than the static dc_link.
*/
struct link_encoder *link_enc;
+ struct dc_stream_debug_options debug;
struct dc_panel_patch sink_patches;
union display_content_support content_support;
struct dc_crtc_timing timing;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 45ab48fe5d00..150c19286d67 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -83,7 +83,7 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define MAX_SURFACE_NUM 4
+#define MAX_SURFACE_NUM 6
#define NUM_PIXEL_FORMATS 10
enum tiling_mode {
@@ -196,6 +196,7 @@ struct dc_panel_patch {
unsigned int disable_fams;
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
+ unsigned int delay_disable_aux_intercept_ms;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 0d7db132a20f..01490c9ba958 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o \
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dmub_abm_lcd.o dce_panel_cntl.o \
dmub_hw_lock_mgr.o dmub_outbox.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 9fc48208c2e4..a66f83a61402 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -24,212 +24,151 @@
*/
#include "dmub_abm.h"
-#include "dce_abm.h"
+#include "dmub_abm_lcd.h"
#include "dc.h"
-#include "dc_dmub_srv.h"
-#include "dmub/dmub_srv.h"
#include "core_types.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-
-#include "atom.h"
#define TO_DMUB_ABM(abm)\
container_of(abm, struct dce_abm, base)
-#define REG(reg) \
- (dce_abm->regs->reg)
+#define ABM_FEATURE_NO_SUPPORT 0
+#define ABM_LCD_SUPPORT 1
-#undef FN
-#define FN(reg_name, field_name) \
- dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst)
+{
+ struct dc_context *dc = abm->ctx;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int i;
+ int edp_num;
+ unsigned int ret = ABM_FEATURE_NO_SUPPORT;
-#define CTX \
- dce_abm->base.ctx
+ dc_get_edp_links(dc->dc, edp_links, &edp_num);
-#define DISABLE_ABM_IMMEDIATELY 255
+ for (i = 0; i < edp_num; i++) {
+ if (panel_inst == i)
+ break;
+ }
+ if (i < edp_num) {
+ ret = ABM_LCD_SUPPORT;
+ }
+ return ret;
+}
-static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
{
- union dmub_rb_cmd cmd;
- uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
- uint32_t edp_id_count = dc->dc_edp_id_count;
- int i;
- uint8_t panel_mask = 0;
-
- for (i = 0; i < edp_id_count; i++)
- panel_mask |= 0x01 << i;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
- cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
- cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
- cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
- cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
-
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dmub_abm_init(abm, backlight);
}
-static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
-
- REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3);
- REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1);
- REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3);
- REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1);
- REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1);
-
- REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
- ABM1_HG_NUM_OF_BINS_SEL, 0,
- ABM1_HG_VMAX_SEL, 1,
- ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
-
- REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
- ABM1_IPCSC_COEFF_SEL_R, 2,
- ABM1_IPCSC_COEFF_SEL_G, 4,
- ABM1_IPCSC_COEFF_SEL_B, 2);
+ return dmub_abm_get_current_backlight(abm);
+}
- REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
- BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm)
+{
+ return dmub_abm_get_target_backlight(abm);
+}
- REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
- BL1_PWM_TARGET_ABM_LEVEL, backlight);
+static bool dmub_abm_set_level_ex(struct abm *abm, uint32_t level)
+{
+ bool ret = false;
+ unsigned int feature_support, i;
+ uint8_t panel_mask0 = 0;
- REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ for (i = 0; i < MAX_NUM_EDP; i++) {
+ feature_support = abm_feature_support(abm, i);
- REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
- ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
- ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+ if (feature_support == ABM_LCD_SUPPORT)
+ panel_mask0 |= (0x01 << i);
+ }
- REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
- ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
- ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
- ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+ if (panel_mask0)
+ ret = dmub_abm_set_level(abm, level, panel_mask0);
- dmub_abm_enable_fractional_pwm(abm->ctx);
+ return ret;
}
-static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+static bool dmub_abm_init_config_ex(struct abm *abm,
+ const char *src,
+ unsigned int bytes,
+ unsigned int inst)
{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
- unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+ unsigned int feature_support;
- /* return backlight in hardware format which is unsigned 17 bits, with
- * 1 bit integer and 16 bit fractional
- */
- return backlight;
-}
+ feature_support = abm_feature_support(abm, inst);
-static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
-{
- struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
- unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+ if (feature_support == ABM_LCD_SUPPORT)
+ dmub_abm_init_config(abm, src, bytes, inst);
- /* return backlight in hardware format which is unsigned 17 bits, with
- * 1 bit integer and 16 bit fractional
- */
- return backlight;
+ return true;
}
-static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+static bool dmub_abm_set_pause_ex(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- struct dc_link *edp_links[MAX_NUM_EDP];
- int i;
- int edp_num;
- uint8_t panel_mask = 0;
+ bool ret = false;
+ unsigned int feature_support;
- dc_get_edp_links(dc->dc, edp_links, &edp_num);
-
- for (i = 0; i < edp_num; i++) {
- if (edp_links[i]->link_status.link_active)
- panel_mask |= (0x01 << i);
- }
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_set_level.header.type = DMUB_CMD__ABM;
- cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
- cmd.abm_set_level.abm_set_level_data.level = level;
- cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
- cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+ feature_support = abm_feature_support(abm, panel_inst);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_set_pause(abm, pause, panel_inst, stream_inst);
- return true;
+ return ret;
}
-static bool dmub_abm_init_config(struct abm *abm,
- const char *src,
- unsigned int bytes,
- unsigned int inst)
+static bool dmub_abm_set_pipe_ex(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- uint8_t panel_mask = 0x01 << inst;
+ bool ret = false;
+ unsigned int feature_support;
- // TODO: Optimize by only reading back final 4 bytes
- dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+ feature_support = abm_feature_support(abm, panel_inst);
- // Copy iramtable into cw7
- memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_set_pipe(abm, otg_inst, option, panel_inst);
- memset(&cmd, 0, sizeof(cmd));
- // Fw will copy from cw7 to fw_state
- cmd.abm_init_config.header.type = DMUB_CMD__ABM;
- cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
- cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
- cmd.abm_init_config.abm_init_config_data.bytes = bytes;
- cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask;
+ return ret;
+}
- cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+static bool dmub_abm_set_event_ex(struct abm *abm, unsigned int full_screen, unsigned int video_mode,
+ unsigned int hdr_mode, unsigned int panel_inst)
+{
+ bool ret = false;
+ unsigned int feature_support;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ feature_support = abm_feature_support(abm, panel_inst);
- return true;
+ return ret;
}
-static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
+static bool dmub_abm_set_backlight_level_pwm_ex(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int controller_id,
+ unsigned int panel_inst)
{
- union dmub_rb_cmd cmd;
- struct dc_context *dc = abm->ctx;
- uint8_t panel_mask = 0x01 << panel_inst;
+ bool ret = false;
+ unsigned int feature_support;
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_pause.header.type = DMUB_CMD__ABM;
- cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE;
- cmd.abm_pause.abm_pause_data.enable = pause;
- cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
- cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+ feature_support = abm_feature_support(abm, panel_inst);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (feature_support == ABM_LCD_SUPPORT)
+ ret = dmub_abm_set_backlight_level(abm, backlight_pwm_u16_16, frame_ramp, panel_inst);
- return true;
+ return ret;
}
static const struct abm_funcs abm_funcs = {
- .abm_init = dmub_abm_init,
- .set_abm_level = dmub_abm_set_level,
- .get_current_backlight = dmub_abm_get_current_backlight,
- .get_target_backlight = dmub_abm_get_target_backlight,
- .init_abm_config = dmub_abm_init_config,
- .set_abm_pause = dmub_abm_set_pause,
+ .abm_init = dmub_abm_init_ex,
+ .set_abm_level = dmub_abm_set_level_ex,
+ .get_current_backlight = dmub_abm_get_current_backlight_ex,
+ .get_target_backlight = dmub_abm_get_target_backlight_ex,
+ .init_abm_config = dmub_abm_init_config_ex,
+ .set_abm_pause = dmub_abm_set_pause_ex,
+ .set_pipe_ex = dmub_abm_set_pipe_ex,
+ .set_abm_event = dmub_abm_set_event_ex,
+ .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm_ex,
};
static void dmub_abm_construct(
@@ -256,16 +195,19 @@ struct abm *dmub_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+ if (ctx->dc->caps.dmcub_support) {
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
- if (abm_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
- dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+ dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
- return &abm_dce->base;
+ return &abm_dce->base;
+ }
+ return NULL;
}
void dmub_abm_destroy(struct abm **abm)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
new file mode 100644
index 000000000000..39da73eba86e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dmub_abm_lcd.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#ifdef _WIN32
+#include "atombios.h"
+#else
+#include "atom.h"
+#endif
+
+#define TO_DMUB_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+ dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+ uint32_t edp_id_count = dc->dc_edp_id_count;
+ int i;
+ uint8_t panel_mask = 0;
+
+ for (i = 0; i < edp_id_count; i++)
+ panel_mask |= 0x01 << i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask;
+ cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x3);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x1);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x3);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x1);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x1);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+ dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+ cmd.abm_set_level.abm_set_level_data.level = level;
+ cmd.abm_set_level.abm_set_level_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+void dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes,
+ unsigned int inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint8_t panel_mask = 0x01 << inst;
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+ memset(&cmd, 0, sizeof(cmd));
+ // Fw will copy from cw7 to fw_state
+ cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+ cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+ cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+ cmd.abm_init_config.abm_init_config_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_init_config.abm_init_config_data.panel_mask = panel_mask;
+
+ cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+}
+
+bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint8_t panel_mask = 0x01 << panel_inst;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_pause.header.type = DMUB_CMD__ABM;
+ cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE;
+ cmd.abm_pause.abm_pause_data.enable = pause;
+ cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.set_pipe_option = option;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+bool dmub_abm_set_backlight_level(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
new file mode 100644
index 000000000000..00b4e268768e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DMUB_ABM_LCD_H__
+#define __DMUB_ABM_LCD_H__
+
+#include "abm.h"
+
+void dmub_abm_init(struct abm *abm, uint32_t backlight);
+bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
+unsigned int dmub_abm_get_current_backlight(struct abm *abm);
+unsigned int dmub_abm_get_target_backlight(struct abm *abm);
+void dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes,
+ unsigned int inst);
+
+bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst);
+bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t option, uint32_t panel_inst);
+bool dmub_abm_set_backlight_level(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int panel_inst);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 3f32e9c3fbaf..2aa0e01a6891 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -47,9 +47,7 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
if (!lock)
cmd.lock_hw.lock_hw_data.should_release = 1;
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
index fff1d07d865d..d8009b2dc56a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
@@ -48,7 +48,5 @@ void dmub_enable_outbox_notification(struct dc_dmub_srv *dmub_srv)
sizeof(cmd.outbox1_enable.header);
cmd.outbox1_enable.enable = true;
- dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dmub_srv);
- dc_dmub_srv_wait_idle(dmub_srv);
+ dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 9705d8f88382..4000a834592c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -168,9 +168,7 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst;
cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -198,9 +196,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait, uint8
cmd.psr_enable.header.payload_bytes = 0; // Send header only
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Below loops 1000 x 500us = 500 ms.
* Exit PSR may need to wait 1-2 frames to power up. Timeout after at
@@ -248,9 +244,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -269,9 +263,7 @@ static void dmub_psr_set_sink_vtotal_in_psr_active(struct dmub_psr *dmub,
cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle;
cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -290,9 +282,7 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -422,9 +412,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 2;
copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -445,9 +433,7 @@ static void dmub_psr_force_static(struct dmub_psr *dmub, uint8_t panel_inst)
cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC;
cmd.psr_enable.header.payload_bytes = 0;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1c3b6f25a782..a7ad1d7bc43e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1524,7 +1524,9 @@ void dcn10_init_hw(struct dc *dc)
if (!dcb->funcs->is_accelerated_mode(dcb))
hws->funcs.disable_vga(dc->hwseq);
- hws->funcs.bios_golden_init(dc);
+ if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
+ hws->funcs.bios_golden_init(dc);
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 24bd93219936..0ddd310cc971 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -623,6 +623,11 @@ void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_s
REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid);
REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe);
}
+
+ if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) {
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6);
+ hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+ }
}
static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 422fbf79da64..f49c1c0d6274 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -313,6 +313,10 @@ void dcn20_init_blank(
}
opp = dc->res_pool->opps[opp_id_src0];
+ /* don't override the blank pattern if already enabled with the correct one. */
+ if (opp->funcs->dpg_is_blanked && opp->funcs->dpg_is_blanked(opp))
+ return;
+
if (num_opps == 2) {
otg_active_width = otg_active_width / 2;
@@ -2126,7 +2130,7 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
- if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2134,7 +2138,7 @@ void dcn20_optimize_bandwidth(
&& pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
&& pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
- pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+ pipe_ctx->dlg_regs.min_dst_y_next_start);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index 58e459c7e7d3..f976fac8dc3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -667,7 +667,6 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip
static void dmcub_PLAT_54186_wa(struct hubp *hubp,
struct surface_flip_registers *flip_regs)
{
- struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
union dmub_rb_cmd cmd;
@@ -690,11 +689,7 @@ static void dmcub_PLAT_54186_wa(struct hubp *hubp,
cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_queue(dmcub, &cmd);
- PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_execute(dmcub);
- PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_wait_idle(dmcub);
+ dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
PERF_TRACE(); // TODO: remove after performance is stable.
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 2a182c2f57d6..43463d08f21b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -152,13 +152,28 @@ static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t optio
cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
+static void dmub_abm_set_backlight(struct dc_context *dc, uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst);
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
@@ -173,8 +188,12 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
}
if (abm && panel_cntl) {
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
- panel_cntl->inst);
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE,
+ panel_cntl->inst);
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_IMMEDIATELY_DISABLE, panel_cntl->inst);
+ }
panel_cntl->funcs->store_backlight_level(panel_cntl);
}
}
@@ -191,18 +210,21 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
return;
}
- if (abm && panel_cntl)
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ if (abm && panel_cntl) {
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ }
+ }
}
bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp)
{
- union dmub_rb_cmd cmd;
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
if (dc->dc->res_pool->dmcu) {
@@ -210,21 +232,23 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
return true;
}
- if (abm && panel_cntl)
- dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ if (abm != NULL) {
+ uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
- memset(&cmd, 0, sizeof(cmd));
- cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
- cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
- cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
- cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
- cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_CONTROL_VERSION_1;
- cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst);
- cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+ if (abm && panel_cntl) {
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst, SET_ABM_PIPE_NORMAL, panel_cntl->inst);
+ }
+ }
+ }
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->dmub_srv);
- dc_dmub_srv_wait_idle(dc->dmub_srv);
+ if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+ abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
+ frame_ramp, 0, panel_cntl->inst);
+ else
+ dmub_abm_set_backlight(dc, backlight_pwm_u16_16, frame_ramp, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 9d08127d209b..005dbe099a7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -436,6 +436,21 @@ void enc3_stream_encoder_update_dp_info_packets(
&info_frame->vsc,
true);
}
+ /* TODO: VSC SDP at packetIndex 1 should be retricted only if PSR-SU on.
+ * There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU.
+ * In addition, currently the driver check the valid bit then update and
+ * send the corresponding Infopacket. For PSR-SU, the SDP only be sent
+ * while entering PSR-SU mode. So we need another parameter(e.g. send)
+ * in dc_info_packet to indicate which infopacket should be enabled by
+ * default here.
+ */
+ if (info_frame->vsc.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 1, /* packetIndex */
+ &info_frame->vsc,
+ true);
+ }
/* TODO: VSC SDP at packetIndex 1 should be restricted only if PSR-SU on.
* There should have another Infopacket type (e.g. vsc_psrsu) for PSR_SU.
* In addition, currently the driver check the valid bit then update and
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 8263a07f265f..3303c9aae068 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -632,7 +632,7 @@ void dcn30_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
// Get DMCUB capabilities
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
@@ -736,8 +736,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ;
cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -859,9 +858,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.cursor_height = cursor_attr.height;
cmd.mall.cursor_pitch = cursor_attr.pitch;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Use copied cursor, and it's okay to not switch back */
cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
@@ -877,8 +874,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.tmr_scale = tmr_scale;
cmd.mall.debug_bits = dc->debug.mall_error_as_fatal;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -895,9 +891,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.mall.header.payload_bytes =
sizeof(cmd.mall) - sizeof(cmd.mall.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index c95f000b63b2..34b08d90dc1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -301,7 +301,12 @@ static void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *o
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
- optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+ struct dc *dc = optc->ctx->dc;
+
+ if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams)
+ dc_dmub_srv_drr_update_cmd(dc, optc->inst, vtotal_min, vtotal_max);
+ else
+ optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
}
void optc3_tg_init(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 745a5d187a98..bd62502380d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -117,7 +117,6 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
memset(cmd, 0, sizeof(*cmd));
cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
@@ -126,7 +125,7 @@ static bool query_dp_alt_from_dmub(struct link_encoder *enc,
cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
- if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd))
+ if (!dm_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return false;
return true;
@@ -425,7 +424,6 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
struct dmub_cmd_dig_dpia_control_data *dpia_control)
{
union dmub_rb_cmd cmd;
- struct dc_dmub_srv *dmub = dc_ctx->dmub_srv;
memset(&cmd, 0, sizeof(cmd));
@@ -438,9 +436,7 @@ static bool link_dpia_control(struct dc_context *dc_ctx,
cmd.dig1_dpia_control.dpia_control = *dpia_control;
- dc_dmub_srv_cmd_queue(dmub, &cmd);
- dc_dmub_srv_cmd_execute(dmub);
- dc_dmub_srv_wait_idle(dmub);
+ dm_execute_dmub_cmd(dc_ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 7e7cd5b64e6a..7445ed27852a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -103,6 +103,7 @@ static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigne
default:
break;
}
+ DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments);
/* Should never be hit, if it is we have an erroneous hw config*/
ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
+ hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 62ce36c75c4d..55494730e500 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -197,10 +197,6 @@ void dcn31_init_hw(struct dc *dc)
}
}
- /* Enables outbox notifications for usb4 dpia */
- if (dc->res_pool->usb4_dpia_count)
- dmub_enable_outbox_notification(dc->ctx->dmub_srv);
-
/* we want to turn off all dp displays before doing detection */
dc->link_srv->blank_all_dp_displays(dc);
@@ -297,8 +293,9 @@ void dcn31_init_hw(struct dc *dc)
#endif
// Get DMCUB capabilities
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
void dcn31_dsc_pg_control(
@@ -442,9 +439,7 @@ void dcn31_z10_save_init(struct dc *dc)
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn31_z10_restore(const struct dc *dc)
@@ -462,9 +457,7 @@ void dcn31_z10_restore(const struct dc *dc)
cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT;
cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 3a32810bbe38..8598ea233ef3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -58,6 +58,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 11ea9d13e312..217acd4e292a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -52,7 +52,7 @@ static bool dcn31_query_backlight_info(struct panel_cntl *panel_cntl, union dmub
cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data);
cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst;
- return dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd);
+ return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
}
static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl)
@@ -85,7 +85,7 @@ static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
cmd.panel_cntl.data.bl_pwm_ref_div2 =
panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2;
- if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
+ if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
return 0;
panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index 40c488b26901..6fb3f64e3057 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -417,9 +417,7 @@ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool
cmd.domain_control.data.inst = hubp_inst;
cmd.domain_control.data.power_gate = !power_on;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(ctx->dmub_srv);
- dc_dmub_srv_wait_idle(ctx->dmub_srv);
+ dm_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
PERF_TRACE();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index 5267e901a35c..ce53339b2e10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -60,6 +60,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.cursor_lock = dcn10_cursor_lock,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 41c972c8eb19..42a0157fd813 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -136,6 +136,9 @@
#define DCN3_15_MAX_DET_SIZE 384
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
+#define DCN3_15_MAX_DET_SEGS (DCN3_15_MAX_DET_SIZE / DCN3_15_CRB_SEGMENT_SIZE_KB)
+/* Minimum 2 extra segments need to be in compbuf and claimable to guarantee seamless mpo transitions */
+#define MIN_RESERVED_DET_SEGS 2
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
@@ -1636,21 +1639,57 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+static int source_format_to_bpp (enum source_format_class SourcePixelFormat)
+{
+ if (SourcePixelFormat == dm_444_64)
+ return 8;
+ else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16)
+ return 2;
+ else if (SourcePixelFormat == dm_444_8)
+ return 1;
+ else if (SourcePixelFormat == dm_rgbe_alpha)
+ return 5;
+ else if (SourcePixelFormat == dm_420_8)
+ return 3;
+ else if (SourcePixelFormat == dm_420_12)
+ return 6;
+ else
+ return 4;
+}
+
+static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ /*Don't apply if MPO to avoid transition issues*/
+ if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state != res_ctx->pipe_ctx[i].plane_state)
+ return false;
+ }
+ return true;
+}
+
static int dcn315_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
{
- int i, pipe_cnt;
+ int i, pipe_cnt, crb_idx, crb_pipes;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB;
+ int remaining_det_segs = max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB;
+ bool pixel_rate_crb = allow_pixel_rate_crb(dc, context);
DC_FP_START();
dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
DC_FP_END();
- for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
if (!res_ctx->pipe_ctx[i].stream)
@@ -1671,6 +1710,23 @@ static int dcn315_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) {
+ int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format);
+ /* Ceil to crb segment size */
+ int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate(
+ &context->bw_ctx.dml.soc, timing->pix_clk_100hz, bpp, DCN3_15_CRB_SEGMENT_SIZE_KB);
+ if (approx_det_segs_required_for_pstate <= 2 * DCN3_15_MAX_DET_SEGS) {
+ bool split_required = approx_det_segs_required_for_pstate > DCN3_15_MAX_DET_SEGS;
+ split_required = split_required || timing->pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc);
+ split_required = split_required || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+ if (split_required)
+ approx_det_segs_required_for_pstate += approx_det_segs_required_for_pstate % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override = approx_det_segs_required_for_pstate;
+ remaining_det_segs -= approx_det_segs_required_for_pstate;
+ } else
+ remaining_det_segs = -1;
+ crb_pipes++;
+ }
DC_FP_END();
if (pipes[pipe_cnt].dout.dsc_enable) {
@@ -1689,16 +1745,49 @@ static int dcn315_populate_dml_pipes_from_context(
break;
}
}
-
pipe_cnt++;
}
+ /* Spread remaining unreserved crb evenly among all pipes, use default policy if not enough det or single pipe */
+ if (pixel_rate_crb) {
+ for (i = 0, pipe_cnt = 0, crb_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &res_ctx->pipe_ctx[i];
+ if (!pipe->stream)
+ continue;
+
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe) {
+ bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc)
+ || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120);
+
+ if (remaining_det_segs < 0 || crb_pipes == 1)
+ pipes[pipe_cnt].pipe.src.det_size_override = 0;
+ if (remaining_det_segs > MIN_RESERVED_DET_SEGS)
+ pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes +
+ (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0);
+ if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) {
+ /* Clamp to 2 pipe split max det segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS);
+ pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS;
+ }
+ if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) {
+ /* If we are splitting we must have an even number of segments */
+ remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2;
+ }
+ /* Convert segments into size for DML use */
+ pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB;
+ crb_idx++;
+ }
+ pipe_cnt++;
+ }
+ }
+
if (pipe_cnt)
context->bw_ctx.dml.ip.det_buffer_size_kbytes =
(max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / pipe_cnt) * DCN3_15_CRB_SEGMENT_SIZE_KB;
if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE)
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE;
- ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_15_DEFAULT_DET_SIZE);
+
dc->config.enable_4to1MPC = false;
if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index 1f5ee5cde6e1..4950eaa4406b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -274,8 +274,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ;
cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -309,8 +308,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header);
cmd.cab.cab_alloc_ways = ways;
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
return true;
}
@@ -326,9 +324,7 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
cmd.cab.header.payload_bytes =
sizeof(cmd.cab) - sizeof(cmd.cab.header);
- dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
- dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
- dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -721,6 +717,9 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
+ clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+ clocks->fclk_p_state_change_support = true;
+ clocks->p_state_change_support = true;
if (dc->debug.disable_boot_optimizations) {
clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
} else {
@@ -730,9 +729,6 @@ static void dcn32_initialize_min_clocks(struct dc *dc)
* freq to ensure that the timing is valid and unchanged.
*/
clocks->dispclk_khz = dc->clk_mgr->funcs->get_dispclk_from_dentist(dc->clk_mgr);
- clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
- clocks->fclk_p_state_change_support = true;
- clocks->p_state_change_support = true;
}
dc->clk_mgr->funcs->update_clocks(
@@ -946,8 +942,10 @@ void dcn32_init_hw(struct dc *dc)
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
- dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv);
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
+ dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
+ dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index 2ee798965bc2..6ef56fb32131 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -245,16 +245,9 @@ static void optc32_set_drr(
}
optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1);
- optc32_setup_manual_trigger(optc);
- } else {
- REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
- OTG_SET_V_TOTAL_MIN_MASK, 0,
- OTG_V_TOTAL_MIN_SEL, 0,
- OTG_V_TOTAL_MAX_SEL, 0,
- OTG_FORCE_LOCK_ON_EVENT, 0);
-
- optc->funcs->set_vtotal_min_max(optc, 0, 0);
}
+
+ optc32_setup_manual_trigger(optc);
}
static struct timing_generator_funcs dcn32_tg_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 22dd1ebea618..4de2f8813dce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -726,8 +726,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.override_dispclk_programming = true,
.disable_fpo_optimizations = false,
.fpo_vactive_margin_us = 2000, // 2000us
- .disable_fpo_vactive = true,
+ .disable_fpo_vactive = false,
.disable_boot_optimizations = false,
+ .disable_subvp_high_refresh = true,
+ .disable_dp_plus_plus_wa = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1888,6 +1890,8 @@ bool dcn32_validate_bandwidth(struct dc *dc,
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ dcn32_override_min_req_memclk(dc, context);
+
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 3937dbc1e552..04be01ae1ecf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -40,6 +40,7 @@
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq
#define DCN3_2_MIN_ACTIVE_SWITCH_MARGIN_FPO_US 100 // Only allow FPO + Vactive if active margin >= 100
+#define SUBVP_HIGH_REFRESH_LIST_LEN 3
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -47,6 +48,15 @@
extern struct _vcs_dpi_ip_params_st dcn3_2_ip;
extern struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc;
+struct subvp_high_refresh_list {
+ int min_refresh;
+ int max_refresh;
+ struct resolution {
+ int width;
+ int height;
+ } res[SUBVP_HIGH_REFRESH_LIST_LEN];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -151,10 +161,14 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
+bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe);
+
unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context);
+bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index eeca16faf31a..df912c333bbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -656,3 +656,18 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
return fpo_candidate_stream;
}
+
+bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int width, unsigned int height)
+{
+ bool is_native_scaling = false;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->src_rect.width == width &&
+ pipe->plane_state->src_rect.height == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index a60ddb343d13..4c1e0f5a5f09 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -725,8 +725,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.override_dispclk_programming = true,
.disable_fpo_optimizations = false,
.fpo_vactive_margin_us = 2000, // 2000us
- .disable_fpo_vactive = true,
+ .disable_fpo_vactive = false,
.disable_boot_optimizations = false,
+ .disable_subvp_high_refresh = true,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1735,9 +1736,9 @@ static bool dcn321_resource_construct(
dc->caps.subvp_pstate_allow_width_us = 20;
dc->caps.subvp_vertical_int_margin_us = 30;
dc->caps.subvp_drr_vblank_start_margin_us = 100; // 100us margin
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 9a3f2a44f882..d33d595405a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -40,6 +40,7 @@
struct dmub_srv;
struct dc_dmub_srv;
+union dmub_rb_cmd;
irq_handler_idx dm_register_interrupt(
struct dc_context *ctx,
@@ -274,6 +275,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
#define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX)
/*
+ * DMUB Interfaces
+ */
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+/*
* Debug and verification hooks
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index b52ba6ffabe1..facf269c4326 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -269,4 +269,10 @@ struct dtn_min_clk_info {
uint32_t min_memory_clock_khz;
};
+enum dm_dmub_wait_type {
+ DM_DMUB_WAIT_TYPE_NO_WAIT,
+ DM_DMUB_WAIT_TYPE_WAIT,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY,
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index f1c1a4b5fcac..3407f9a2c6a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -948,10 +948,10 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
- unsigned int optimized_min_dst_y_next_start_us;
+ unsigned int min_dst_y_next_start_us;
plane_count = 0;
- optimized_min_dst_y_next_start_us = 0;
+ min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@@ -973,19 +973,18 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
+ bool isFreesyncVideo;
- if (dc_extended_blank_supported(dc)) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
- && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
- && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
- optimized_min_dst_y_next_start_us =
- context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
- break;
- }
+ isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
+ isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
+ min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
+ break;
}
}
@@ -993,7 +992,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
+ if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -1043,7 +1042,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx;
+ int i, pipe_idx, active_hubp_count = 0;
dc_assert_fp_enabled();
@@ -1079,6 +1078,8 @@ void dcn20_calculate_dlg_params(struct dc *dc,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ active_hubp_count++;
pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
@@ -1105,6 +1106,10 @@ void dcn20_calculate_dlg_params(struct dc *dc,
pipe_idx++;
}
+ /* If DCN isn't making memory requests we can allow pstate change */
+ if (!active_hubp_count) {
+ context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ }
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -1885,6 +1890,17 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
}
+ if ((int)(bb->sr_exit_z8_time_us * 1000)
+ != dc->bb_overrides.sr_exit_z8_time_ns
+ && dc->bb_overrides.sr_exit_z8_time_ns) {
+ bb->sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+ }
+
+ if ((int)(bb->sr_enter_plus_exit_z8_time_us * 1000)
+ != dc->bb_overrides.sr_enter_plus_exit_z8_time_ns
+ && dc->bb_overrides.sr_enter_plus_exit_z8_time_ns) {
+ bb->sr_enter_plus_exit_z8_time_us = dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+ }
if ((int)(bb->urgent_latency_us * 1000) != dc->bb_overrides.urgent_latency_ns
&& dc->bb_overrides.urgent_latency_ns) {
bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index b7c2844d0cbe..f294f2f8c75b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -810,7 +810,7 @@ static bool CalculatePrefetchSchedule(
*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockWidth256BytesC) + myPipe->BlockWidth256BytesC;
} else {
*swath_width_luma_ub = dml_ceil(SwathWidthY - 1, myPipe->BlockHeight256BytesY) + myPipe->BlockHeight256BytesY;
- if (myPipe->BlockWidth256BytesC > 0)
+ if (myPipe->BlockHeight256BytesC > 0)
*swath_width_chroma_ub = dml_ceil(SwathWidthY / 2 - 1, myPipe->BlockHeight256BytesC) + myPipe->BlockHeight256BytesC;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 7d0626e42ea6..9af1a43c042b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -4939,8 +4939,8 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
- v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k]
- + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k];
+ v->TotImmediateFlipBytes = v->TotImmediateFlipBytes + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k]
+ + v->MetaRowBytes[i][j][k] + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5130,7 +5130,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
ViewportExceedsSurface = true;
if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
- && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+ && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
ViewportExceedsSurface = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index cd3cfcb2a2b0..0497a5d74a62 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -980,7 +980,7 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
unsigned int vstartup_start = 0;
unsigned int dst_x_after_scaler = 0;
- unsigned int dst_y_after_scaler = 0;
+ int dst_y_after_scaler = 0;
double line_wait = 0;
double dst_y_prefetch = 0;
double dst_y_per_vm_vblank = 0;
@@ -1171,6 +1171,8 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ if (dst_y_after_scaler < 0)
+ dst_y_after_scaler = 0;
// do some adjustment on the dst_after scaler to account for odm combine mode
dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 59836570603a..19d034341e64 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -483,7 +483,7 @@ void dcn31_calculate_wm_and_dlg_fp(
int pipe_cnt,
int vlevel)
{
- int i, pipe_idx, active_hubp_count = 0;
+ int i, pipe_idx, total_det = 0, active_hubp_count = 0;
double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
dc_assert_fp_enabled();
@@ -563,6 +563,18 @@ void dcn31_calculate_wm_and_dlg_fp(
if (context->res_ctx.pipe_ctx[i].stream)
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
}
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb =
+ get_det_buffer_size_kbytes(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ if (context->res_ctx.pipe_ctx[i].det_buffer_size_kb > 384)
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb /= 2;
+ total_det += context->res_ctx.pipe_ctx[i].det_buffer_size_kb;
+ pipe_idx++;
+ }
+ context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - total_det;
}
void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
@@ -815,3 +827,14 @@ int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
+
+int dcn_get_approx_det_segs_required_for_pstate(
+ struct _vcs_dpi_soc_bounding_box_st *soc,
+ int pix_clk_100hz, int bpp, int seg_size_kb)
+{
+ /* Roughly calculate required crb to hide latency. In practice there is slightly
+ * more buffer available for latency hiding
+ */
+ return (int)(soc->dram_clock_change_latency_us * pix_clk_100hz * bpp
+ / 10240000 + seg_size_kb - 1) / seg_size_kb;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
index 687d3522cc33..8f9c8faed260 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -47,6 +47,9 @@ void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc);
+int dcn_get_approx_det_segs_required_for_pstate(
+ struct _vcs_dpi_soc_bounding_box_st *soc,
+ int pix_clk_100hz, int bpp, int seg_size_kb);
int dcn31x_populate_dml_pipes_from_context(struct dc *dc,
struct dc_state *context,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index bd674dc30df3..01603abd75bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -532,7 +532,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
- unsigned int DETBufferSizeInKByte,
+ bool DETSharedByAllDPP,
+ unsigned int DETBufferSizeInKByte[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@@ -3118,7 +3119,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->SurfaceWidthC[k],
v->SurfaceHeightY[k],
v->SurfaceHeightC[k],
- v->DETBufferSizeInKByte[0] * 1024,
+ v->DETBufferSizeInKByte[k] * 1024,
v->BlockHeight256BytesY[k],
v->BlockHeight256BytesC[k],
v->SurfaceTiling[k],
@@ -3313,7 +3314,8 @@ static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
- v->DETBufferSizeInKByte[0],
+ mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
+ v->DETBufferSizeInKByte,
dummy1,
dummy2,
v->SourceScan,
@@ -3779,14 +3781,16 @@ static noinline void CalculatePrefetchSchedulePerPlane(
&v->VReadyOffsetPix[k]);
}
-static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int *DETBufferSizeInKByte)
+static void PatchDETBufferSizeInKByte(unsigned int NumberOfActivePlanes, int NoOfDPPThisState[], unsigned int config_return_buffer_size_in_kbytes, unsigned int DETBufferSizeInKByte[])
{
int i, total_pipes = 0;
for (i = 0; i < NumberOfActivePlanes; i++)
total_pipes += NoOfDPPThisState[i];
- *DETBufferSizeInKByte = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
- if (*DETBufferSizeInKByte > DCN3_15_MAX_DET_SIZE)
- *DETBufferSizeInKByte = DCN3_15_MAX_DET_SIZE;
+ DETBufferSizeInKByte[0] = ((config_return_buffer_size_in_kbytes - DCN3_15_MIN_COMPBUF_SIZE_KB) / 64 / total_pipes) * 64;
+ if (DETBufferSizeInKByte[0] > DCN3_15_MAX_DET_SIZE)
+ DETBufferSizeInKByte[0] = DCN3_15_MAX_DET_SIZE;
+ for (i = 1; i < NumberOfActivePlanes; i++)
+ DETBufferSizeInKByte[i] = DETBufferSizeInKByte[0];
}
@@ -4026,7 +4030,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateSwathAndDETConfiguration(
true,
v->NumberOfActivePlanes,
- v->DETBufferSizeInKByte[0],
+ mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
+ v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@@ -4166,6 +4171,10 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|| (v->PlaneRequiredDISPCLK > v->MaxDispclkRoundedDownToDFSGranularity)) {
v->DISPCLK_DPPCLK_Support[i][j] = false;
}
+ if (mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[k] > DCN3_15_MAX_DET_SIZE && v->NoOfDPP[i][j][k] < 2) {
+ v->MPCCombine[i][j][k] = true;
+ v->NoOfDPP[i][j][k] = 2;
+ }
}
v->TotalNumberOfActiveDPP[i][j] = 0;
v->TotalNumberOfSingleDPPPlanes[i][j] = 0;
@@ -4642,12 +4651,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->ODMCombineEnableThisState[k] = v->ODMCombineEnablePerState[i][k];
}
- if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315)
- PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, &v->DETBufferSizeInKByte[0]);
+ if (v->NumberOfActivePlanes > 1 && mode_lib->project == DML_PROJECT_DCN315 && !v->DETSizeOverride[0])
+ PatchDETBufferSizeInKByte(v->NumberOfActivePlanes, v->NoOfDPPThisState, v->ip.config_return_buffer_size_in_kbytes, v->DETBufferSizeInKByte);
CalculateSwathAndDETConfiguration(
false,
v->NumberOfActivePlanes,
- v->DETBufferSizeInKByte[0],
+ mode_lib->project == DML_PROJECT_DCN315 && v->DETSizeOverride[0],
+ v->DETBufferSizeInKByte,
v->MaximumSwathWidthLuma,
v->MaximumSwathWidthChroma,
v->SourceScan,
@@ -5274,8 +5284,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
- + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
- + v->DPTEBytesPerRow[i][j][k];
+ + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
+ + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -6611,7 +6621,8 @@ static void CalculateStutterEfficiency(
static void CalculateSwathAndDETConfiguration(
bool ForceSingleDPP,
int NumberOfActivePlanes,
- unsigned int DETBufferSizeInKByte,
+ bool DETSharedByAllDPP,
+ unsigned int DETBufferSizeInKByteA[],
double MaximumSwathWidthLuma[],
double MaximumSwathWidthChroma[],
enum scan_direction_class SourceScan[],
@@ -6695,6 +6706,10 @@ static void CalculateSwathAndDETConfiguration(
*ViewportSizeSupport = true;
for (k = 0; k < NumberOfActivePlanes; ++k) {
+ unsigned int DETBufferSizeInKByte = DETBufferSizeInKByteA[k];
+
+ if (DETSharedByAllDPP && DPPPerPlane[k])
+ DETBufferSizeInKByte /= DPPPerPlane[k];
if ((SourcePixelFormat[k] == dm_444_64 || SourcePixelFormat[k] == dm_444_32 || SourcePixelFormat[k] == dm_444_16 || SourcePixelFormat[k] == dm_mono_16
|| SourcePixelFormat[k] == dm_mono_8 || SourcePixelFormat[k] == dm_rgbe)) {
if (SurfaceTiling[k] == dm_sw_linear
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index 2244e4fb8c96..fcde8f21b8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -987,8 +987,7 @@ static void dml_rq_dlg_get_dlg_params(
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
- disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
- disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+ disp_dlg_regs->min_dst_y_next_start_us = 0;
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 9e54e3d0eb78..1d00eb9e73c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -286,6 +286,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
bool upscaled = false;
+ bool isFreesyncVideo = false;
dc_assert_fp_enabled();
@@ -299,9 +300,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
- if (dc_extended_blank_supported(dc) && pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min
- && pipe->stream->adjust.v_total_min > timing->v_total)
+ isFreesyncVideo = pipe->stream->adjust.v_total_max == pipe->stream->adjust.v_total_min;
+ isFreesyncVideo = isFreesyncVideo && pipe->stream->adjust.v_total_min > timing->v_total;
+
+ if (!isFreesyncVideo) {
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
+ } else {
pipes[pipe_cnt].pipe.dest.vtotal = pipe->stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
+ }
if (pipe->plane_state &&
(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height ||
@@ -323,8 +331,6 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.vblank_nom =
- dcn3_14_ip.VBlankNomDefaultUS / (timing->h_total / (timing->pix_clk_100hz / 10000.0));
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 7eb2173b7691..27b83162ae45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -5371,8 +5371,8 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->TotImmediateFlipBytes = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
v->TotImmediateFlipBytes = v->TotImmediateFlipBytes
- + v->NoOfDPP[i][j][k] * v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
- + v->DPTEBytesPerRow[i][j][k];
+ + v->NoOfDPP[i][j][k] * (v->PDEAndMetaPTEBytesPerFrame[i][j][k] + v->MetaRowBytes[i][j][k]
+ + v->DPTEBytesPerRow[i][j][k]);
}
for (k = 0; k < v->NumberOfActivePlanes; k++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index ea4eb66066c4..61ba3e33bb11 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -951,7 +951,6 @@ static void dml_rq_dlg_get_dlg_params(
{
const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
- const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
@@ -1000,8 +999,6 @@ static void dml_rq_dlg_get_dlg_params(
unsigned int vupdate_width;
unsigned int vready_offset;
- unsigned int dispclk_delay_subtotal;
-
unsigned int vstartup_start;
unsigned int dst_x_after_scaler;
unsigned int dst_y_after_scaler;
@@ -1051,7 +1048,6 @@ static void dml_rq_dlg_get_dlg_params(
float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz; // From VBA
- int blank_lines = 0;
memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1075,17 +1071,10 @@ static void dml_rq_dlg_get_dlg_params(
min_ttu_vblank = get_min_ttu_vblank_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // From VBA
dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
- disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
- disp_dlg_regs->optimized_min_dst_y_next_start_us = 0;
- disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
- blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
- if (blank_lines < 0)
- blank_lines = 0;
- if (blank_lines != 0) {
- disp_dlg_regs->optimized_min_dst_y_next_start = vba__min_dst_y_next_start;
- disp_dlg_regs->optimized_min_dst_y_next_start_us = (disp_dlg_regs->optimized_min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
- disp_dlg_regs->min_dst_y_next_start = disp_dlg_regs->optimized_min_dst_y_next_start;
- }
+ disp_dlg_regs->min_dst_y_next_start_us =
+ (vba__min_dst_y_next_start * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+ disp_dlg_regs->min_dst_y_next_start = vba__min_dst_y_next_start * dml_pow(2, 2);
+
ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
dml_print("DML_DLG: %s: min_ttu_vblank (us) = %3.2f\n", __func__, min_ttu_vblank);
@@ -1127,13 +1116,6 @@ static void dml_rq_dlg_get_dlg_params(
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
- dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
-
- if (dout->dsc_enable) {
- double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); // FROM VBA
-
- dispclk_delay_subtotal += dsc_delay;
- }
vstartup_start = dst->vstartup_start;
if (interlaced) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 47beb4ea779d..826059d5b367 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -35,6 +35,15 @@
#define DC_LOGGER_INIT(logger)
+static const struct subvp_high_refresh_list subvp_high_refresh_list = {
+ .min_refresh = 120,
+ .max_refresh = 165,
+ .res = {
+ {.width = 3840, .height = 2160, },
+ {.width = 3440, .height = 1440, },
+ {.width = 2560, .height = 1440, }},
+};
+
struct _vcs_dpi_ip_params_st dcn3_2_ip = {
.gpuvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -692,8 +701,11 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
+ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
+ !pipe->plane_state->address.tmz_surface &&
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
dcn32_allow_subvp_with_active_margin(pipe)))) {
@@ -880,10 +892,6 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
- const struct dc_config *config = &dc->config;
-
- if (config->disable_subvp_drr)
- return false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1129,7 +1137,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
* 4. Display configuration passes validation
* 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
*/
- if (!dc->debug.force_disable_subvp && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
+ if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) &&
(*vlevel == context->bw_ctx.dml.soc.num_states ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
@@ -1432,6 +1440,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.dramclk_khz = 0;
context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = true;
}
/*save a original dppclock copy*/
context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
@@ -2062,6 +2071,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
* sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
*/
+ /*
if (dcn3_2_soc.num_states > 2) {
vlevel_temp = 0;
dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
@@ -2088,6 +2098,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ */
/* Set C, for Dummy P-State:
* All clocks min.
@@ -2189,6 +2200,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.watermarks.a.usr_retraining_ns = get_usr_retraining_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
}
+ /* Make set D = set A since we do not optimized watermarks for MALL */
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -2783,6 +2797,55 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
}
/**
+ * ************************************************************************************************
+ * dcn32_allow_subvp_high_refresh_rate: Determine if the high refresh rate config will allow subvp
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ * @param [in]: pipe: Pipe to be considered for use in subvp
+ *
+ * On high refresh rate display configs, we will allow subvp under the following conditions:
+ * 1. Resolution is 3840x2160, 3440x1440, or 2560x1440
+ * 2. Refresh rate is between 120hz - 165hz
+ * 3. No scaling
+ * 4. Freesync is inactive
+ * 5. For single display cases, freesync must be disabled
+ *
+ * @return: True if pipe can be used for subvp, false otherwise
+ *
+ * ************************************************************************************************
+ */
+bool dcn32_allow_subvp_high_refresh_rate(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe)
+{
+ bool allow = false;
+ uint32_t refresh_rate = 0;
+ uint32_t min_refresh = subvp_high_refresh_list.min_refresh;
+ uint32_t max_refresh = subvp_high_refresh_list.max_refresh;
+ uint32_t i;
+
+ if (!dc->debug.disable_subvp_high_refresh && pipe->stream &&
+ pipe->plane_state && !pipe->stream->vrr_active_variable) {
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
+ / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ if (refresh_rate >= min_refresh && refresh_rate <= max_refresh) {
+ for (i = 0; i < SUBVP_HIGH_REFRESH_LIST_LEN; i++) {
+ uint32_t width = subvp_high_refresh_list.res[i].width;
+ uint32_t height = subvp_high_refresh_list.res[i].height;
+
+ if (dcn32_check_native_scaling_for_res(pipe, width, height)) {
+ if ((context->stream_count == 1 && !pipe->stream->allow_freesync) || context->stream_count > 1) {
+ allow = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ return allow;
+}
+
+/**
* *******************************************************************************************
* dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
*
@@ -2882,3 +2945,18 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
dc_assert_fp_enabled();
dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
}
+
+void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context)
+{
+ // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue)
+ if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) &&
+ dc->dml.soc.num_chans <= 8) {
+ int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels;
+
+ if (context->bw_ctx.dml.vba.DRAMSpeed <= dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16 &&
+ num_mclk_levels > 1) {
+ context->bw_ctx.dml.vba.DRAMSpeed = dc->clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
index dcf512cd3072..a4206b71d650 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h
@@ -80,6 +80,8 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req);
+void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context);
+
void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 13c7e7394b1c..66f44a013fe5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -2322,10 +2322,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.LinkCapacitySupport[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
- && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
- || mode_lib->vba.Output[k] == dm_edp
- || mode_lib->vba.Output[k] == dm_hdmi)
- && mode_lib->vba.OutputBppPerState[i][k] == 0) {
+ && (mode_lib->vba.Output[k] == dm_dp || mode_lib->vba.Output[k] == dm_dp2p0
+ || mode_lib->vba.Output[k] == dm_edp
+ || mode_lib->vba.Output[k] == dm_hdmi)
+ && mode_lib->vba.OutputBppPerState[i][k] == 0 &&
+ (mode_lib->vba.UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe)) {
+ /* Phantom pipes don't consider DSC in DML, so it could fail link check.
+ * However, we don't care about the link for phantom pipes.
+ */
mode_lib->vba.LinkCapacitySupport[i] = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 61cc4904ade4..a50e7f4dce42 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1595,7 +1595,6 @@ double dml32_TruncToValidBPP(
unsigned int NonDSCBPP0;
unsigned int NonDSCBPP1;
unsigned int NonDSCBPP2;
- unsigned int NonDSCBPP3;
if (Format == dm_420) {
NonDSCBPP0 = 12;
@@ -1604,10 +1603,9 @@ double dml32_TruncToValidBPP(
MinDSCBPP = 6;
MaxDSCBPP = 1.5 * DSCInputBitPerComponent - 1 / 16;
} else if (Format == dm_444) {
- NonDSCBPP0 = 18;
- NonDSCBPP1 = 24;
- NonDSCBPP2 = 30;
- NonDSCBPP3 = 36;
+ NonDSCBPP0 = 24;
+ NonDSCBPP1 = 30;
+ NonDSCBPP2 = 36;
MinDSCBPP = 8;
MaxDSCBPP = 3 * DSCInputBitPerComponent - 1.0 / 16;
} else {
@@ -1661,9 +1659,7 @@ double dml32_TruncToValidBPP(
else
return dml_floor(16.0 * MaxLinkBPP, 1.0) / 16.0;
} else {
- if (MaxLinkBPP >= NonDSCBPP3)
- return NonDSCBPP3;
- else if (MaxLinkBPP >= NonDSCBPP2)
+ if (MaxLinkBPP >= NonDSCBPP2)
return NonDSCBPP2;
else if (MaxLinkBPP >= NonDSCBPP1)
return NonDSCBPP1;
@@ -1674,7 +1670,7 @@ double dml32_TruncToValidBPP(
}
} else {
if (!((DSCEnable == false && (DesiredBPP == NonDSCBPP2 || DesiredBPP == NonDSCBPP1 ||
- DesiredBPP == NonDSCBPP0 || DesiredBPP == NonDSCBPP3)) ||
+ DesiredBPP <= NonDSCBPP0)) ||
(DSCEnable && DesiredBPP >= MinDSCBPP && DesiredBPP <= MaxDSCBPP)))
return BPP_INVALID;
else
@@ -4342,7 +4338,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
+ v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->USRRetrainingRequiredFinal)
- v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (TotalActiveWriteback <= 1) {
@@ -4660,6 +4656,10 @@ void dml32_CalculateMinAndMaxPrefetchMode(
} else if (AllowForPStateChangeOrStutterInVBlankFinal == dm_prefetch_support_uclk_fclk_and_stutter) {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 0;
+ } else if (AllowForPStateChangeOrStutterInVBlankFinal ==
+ dm_prefetch_support_uclk_fclk_and_stutter_if_possible) {
+ *MinPrefetchMode = 0;
+ *MaxPrefetchMode = 3;
} else {
*MinPrefetchMode = 0;
*MaxPrefetchMode = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 395ae8761980..9ba6cb67655f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -116,7 +116,7 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
else
rq_regs->rq_regs_l.min_meta_chunk_size = dml_log2(min_meta_chunk_bytes) - 6 + 1;
- if (min_meta_chunk_bytes == 0)
+ if (p1_min_meta_chunk_bytes == 0)
rq_regs->rq_regs_c.min_meta_chunk_size = 0;
else
rq_regs->rq_regs_c.min_meta_chunk_size = dml_log2(p1_min_meta_chunk_bytes) - 6 + 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 3c077164f362..ff0246a9458f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -619,8 +619,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_h_blank_end;
unsigned int dlg_vblank_end;
unsigned int min_dst_y_next_start;
- unsigned int optimized_min_dst_y_next_start;
- unsigned int optimized_min_dst_y_next_start_us;
+ unsigned int min_dst_y_next_start_us;
unsigned int refcyc_per_htotal;
unsigned int refcyc_x_after_scaler;
unsigned int dst_y_after_scaler;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index f9653f511baa..2f63ae954826 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -571,6 +571,10 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.OutputLinkDPRate[mode_lib->vba.NumberOfActivePlanes] = dout->dp_rate;
mode_lib->vba.ODMUse[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine_policy;
mode_lib->vba.DETSizeOverride[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
+ if (src->det_size_override)
+ mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src->det_size_override;
+ else
+ mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = ip->det_buffer_size_kbytes;
//TODO: Need to assign correct values to dp_multistream vars
mode_lib->vba.OutputMultistreamEn[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_en;
mode_lib->vba.OutputMultistreamId[mode_lib->vba.NumberOfActiveSurfaces] = dout->dp_multistream_id;
@@ -785,6 +789,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[k] =
mode_lib->vba.NumberOfActivePlanes;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
+ if (src_k->det_size_override)
+ mode_lib->vba.DETBufferSizeInKByte[mode_lib->vba.NumberOfActivePlanes] = src_k->det_size_override;
if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
== dm_horz) {
mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 2bdc47615543..b9a05bb025db 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -700,7 +700,7 @@ static int inc_num_slices(union dsc_enc_slice_caps slice_caps, int num_slices)
}
}
- if (new_num_slices == num_slices) // No biger number of slices found
+ if (new_num_slices == num_slices) // No bigger number of slices found
new_num_slices++;
return new_num_slices;
@@ -952,6 +952,13 @@ static bool setup_dsc_config(
else
is_dsc_possible = false;
}
+ // When we force 2:1 ODM, we can't have 1 slice to divide amongst 2 separate DSC instances
+ // need to enforce at minimum 2 horizontal slices
+ if (options->dsc_force_odm_hslice_override) {
+ num_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, 2);
+ if (num_slices_h == 0)
+ is_dsc_possible = false;
+ }
if (!is_dsc_possible)
goto done;
@@ -1163,6 +1170,7 @@ void dc_dsc_policy_set_disable_dsc_stream_overhead(bool disable)
void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
{
options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
+ options->dsc_force_odm_hslice_override = dc->debug.force_odm_combine;
options->max_target_bpp_limit_override_x16 = 0;
options->slice_height_granularity = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index ecb4191b6e64..db5cf9acafe6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -55,6 +55,12 @@ struct abm_funcs {
unsigned int bytes,
unsigned int inst);
bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
+ bool (*set_pipe_ex)(struct abm *abm,
+ unsigned int otg_inst,
+ unsigned int option,
+ unsigned int panel_inst);
+ bool (*set_abm_event)(struct abm *abm, unsigned int full_screen, unsigned int video_mode,
+ unsigned int hdr_mode, unsigned int panel_inst);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index beb26dc8a07f..aa80b3f2ca3f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -111,6 +111,7 @@ struct dcn_hubbub_state {
uint32_t vm_error_vmid;
uint32_t vm_error_pipe;
uint32_t vm_error_mode;
+ uint32_t test_debug_data;
};
struct hubbub_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index a131e30fd7d6..17904de4f155 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -593,6 +593,10 @@ static bool detect_dp(struct dc_link *link,
/* DP SST branch */
link->type = dc_connection_sst_branch;
} else {
+ if (link->dc->debug.disable_dp_plus_plus_wa &&
+ link->link_enc->features.flags.bits.IS_UHBR20_CAPABLE)
+ return false;
+
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
sink_caps,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index ba98013fecd0..f46864630506 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -1043,9 +1043,7 @@ static enum dc_status wake_up_aux_channel(struct dc_link *link)
DP_SET_POWER,
&dpcd_power_state,
sizeof(dpcd_power_state));
- if (status < 0)
- DC_LOG_DC("%s: Failed to power up sink: %s\n", __func__,
- dpcd_power_state == DP_SET_POWER_D0 ? "D0" : "D3");
+ DC_LOG_DC("%s: Failed to power up sink\n", __func__);
return DC_ERROR_UNEXPECTED;
}
@@ -1396,7 +1394,7 @@ static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
link->dc, link->link_enc->transmitter);
- if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
+ if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
cmd.cable_id.header.ret_status == 1) {
cable_id->raw = cmd.cable_id.data.output_raw;
DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
@@ -1452,7 +1450,8 @@ bool read_is_mst_supported(struct dc_link *link)
*/
static bool dpcd_read_sink_ext_caps(struct dc_link *link)
{
- uint8_t dpcd_data;
+ uint8_t dpcd_data = 0;
+ uint8_t edp_general_cap2 = 0;
if (!link)
return false;
@@ -1461,6 +1460,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
return false;
link->dpcd_sink_ext_caps.raw = dpcd_data;
+
+ if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK)
+ return false;
+
+ link->dpcd_caps.panel_luminance_control = (edp_general_cap2 & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) != 0;
+
return true;
}
@@ -1554,6 +1559,9 @@ static bool retrieve_link_cap(struct dc_link *link)
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
+ bool is_fec_supported = false;
+ bool is_dsc_basic_supported = false;
+ bool is_dsc_passthrough_supported = false;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(&down_strm_port_count,
@@ -1696,6 +1704,7 @@ static bool retrieve_link_cap(struct dc_link *link)
/* TODO - decouple raw mst capability from policy decision */
link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
+ DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable));
get_active_converter_info(ds_port.byte, link);
@@ -1803,6 +1812,17 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_SUPPORT,
link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+ if (status == DC_OK) {
+ is_fec_supported = link->dpcd_caps.fec_cap.bits.FEC_CAPABLE;
+ is_dsc_basic_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT;
+ is_dsc_passthrough_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT;
+ DC_LOG_DC("%s: FEC_Sink_Support: %s\n", __func__,
+ str_yes_no(is_fec_supported));
+ DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__,
+ str_yes_no(is_dsc_basic_supported));
+ DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__,
+ str_yes_no(is_dsc_passthrough_supported));
+ }
if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
status = core_link_read_dpcd(
link,
@@ -1931,6 +1951,9 @@ void detect_edp_sink_caps(struct dc_link *link)
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
supported_link_rates[entry]) * 200;
+ DC_LOG_DC("%s: eDP v1.4 supported sink rates: [%d] %d kHz\n", __func__,
+ entry / 2, link_rate_in_khz);
+
if (link_rate_in_khz != 0) {
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 4626fabc0a96..0bb749133909 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -90,7 +90,7 @@ bool dpia_query_hpd_status(struct dc_link *link)
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
/* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
is_hpd_high = cmd.query_hpd.data.result;
DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
index 23d380f09a21..db87cfe37b5c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -211,11 +211,17 @@ enum link_training_result dp_perform_128b_132b_link_training(
dpcd_set_link_settings(link, lt_settings);
- if (result == LINK_TRAINING_SUCCESS)
+ if (result == LINK_TRAINING_SUCCESS) {
result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
+ if (result == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__);
+ }
- if (result == LINK_TRAINING_SUCCESS)
+ if (result == LINK_TRAINING_SUCCESS) {
result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
+ if (result == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: CDS done.\n", __func__);
+ }
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
index 3889ebb2256b..2b4c15b0b407 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -388,6 +388,8 @@ enum link_training_result dp_perform_8b_10b_link_training(
link_res,
lt_settings,
repeater_id);
+ if (status == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__);
repeater_training_done(link, repeater_id);
@@ -409,6 +411,8 @@ enum link_training_result dp_perform_8b_10b_link_training(
link_res,
lt_settings,
DPRX);
+ if (status == LINK_TRAINING_SUCCESS)
+ DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 5731c4b61f9f..15faaf645b14 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -233,7 +233,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
+ uint32_t pre_disable_intercept_delay_ms = 0;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
uint32_t vendor_lttpr_write_address = 0xF004F;
@@ -244,6 +244,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
uint8_t toggle_rate;
uint8_t rate;
+ if (link->local_sink)
+ pre_disable_intercept_delay_ms =
+ link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
+
/* Only 8b/10b is supported */
ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
@@ -259,7 +263,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
/* Certain display and cable configuration require extra delay */
if (offset > 2)
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
}
/* Vendor specific: Reset lane settings */
@@ -380,7 +384,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
0);
/* Vendor specific: Disable intercept */
for (i = 0; i < max_vendor_dpcd_retries; i++) {
- msleep(pre_disable_intercept_delay_ms);
+ if (pre_disable_intercept_delay_ms != 0)
+ msleep(pre_disable_intercept_delay_ms);
dpcd_status = core_link_write_dpcd(
link,
vendor_lttpr_write_address,
@@ -591,10 +596,9 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E};
const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01};
const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
-
+ uint32_t pre_disable_intercept_delay_ms = 0;
uint32_t vendor_lttpr_write_address = 0xF004F;
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
@@ -603,6 +607,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
uint8_t toggle_rate;
uint8_t rate;
+ if (link->local_sink)
+ pre_disable_intercept_delay_ms =
+ link->local_sink->edid_caps.panel_patch.delay_disable_aux_intercept_ms;
+
/* Only 8b/10b is supported */
ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
DP_8b_10b_ENCODING);
@@ -618,7 +626,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
/* Certain display and cable configuration require extra delay */
if (offset > 2)
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ pre_disable_intercept_delay_ms = pre_disable_intercept_delay_ms * 2;
}
/* Vendor specific: Reset lane settings */
@@ -739,7 +747,8 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
0);
/* Vendor specific: Disable intercept */
for (i = 0; i < max_vendor_dpcd_retries; i++) {
- msleep(pre_disable_intercept_delay_ms);
+ if (pre_disable_intercept_delay_ms != 0)
+ msleep(pre_disable_intercept_delay_ms);
dpcd_status = core_link_write_dpcd(
link,
vendor_lttpr_write_address,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 8d1df863659c..2039a345f23a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -165,14 +165,35 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ if (!link->dpcd_caps.panel_luminance_control) {
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
(uint8_t *)(&dpcd_backlight_set),
sizeof(dpcd_backlight_set)) != DC_OK)
- return false;
+ return false;
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
&backlight_control, 1) != DC_OK)
- return false;
+ return false;
+ } else {
+ const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE;
+ struct target_luminance_value *target_luminance = NULL;
+
+ //if target luminance value is greater than 24 bits, clip the value to 24 bits
+ if (backlight_millinits > 0xFFFFFF)
+ backlight_millinits = 0xFFFFFF;
+
+ target_luminance = (struct target_luminance_value *)&backlight_millinits;
+
+ if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &backlight_enable,
+ sizeof(backlight_enable)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE,
+ (uint8_t *)(target_luminance),
+ sizeof(struct target_luminance_value)) != DC_OK)
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index ba1715e2d25a..7c9a2b34bd05 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -271,7 +271,7 @@ struct dmub_srv_hw_params {
*/
struct dmub_diagnostic_data {
uint32_t dmcub_version;
- uint32_t scratch[16];
+ uint32_t scratch[17];
uint32_t pc;
uint32_t undefined_address_fault_addr;
uint32_t inst_fetch_fault_addr;
@@ -282,6 +282,7 @@ struct dmub_diagnostic_data {
uint32_t inbox0_rptr;
uint32_t inbox0_wptr;
uint32_t inbox0_size;
+ uint32_t gpint_datain0;
uint8_t is_dmcub_enabled : 1;
uint8_t is_dmcub_soft_reset : 1;
uint8_t is_dmcub_secure_reset : 1;
@@ -340,6 +341,8 @@ struct dmub_srv_hw_funcs {
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+ uint32_t (*get_inbox1_wptr)(struct dmub_srv *dmub);
+
uint32_t (*get_inbox1_rptr)(struct dmub_srv *dmub);
void (*set_inbox1_wptr)(struct dmub_srv *dmub, uint32_t wptr_offset);
@@ -366,7 +369,6 @@ struct dmub_srv_hw_funcs {
bool (*is_hw_init)(struct dmub_srv *dmub);
- bool (*is_phy_init)(struct dmub_srv *dmub);
void (*enable_dmub_boot_options)(struct dmub_srv *dmub,
const struct dmub_srv_hw_params *params);
@@ -602,6 +604,18 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
/**
+ * dmub_srv_sync_inbox1() - sync sw state with hw state
+ * @dmub: the dmub service
+ *
+ * Sync sw state with hw state when resume from S0i3
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub);
+
+/**
* dmub_srv_cmd_queue() - queues a command to the DMUB
* @dmub: the dmub service
* @cmd: the command to queue
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 598fa1de54ce..af1f50742371 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -257,7 +257,9 @@ struct dmub_feature_caps {
*/
uint8_t psr;
uint8_t fw_assisted_mclk_switch;
- uint8_t reserved[6];
+ uint8_t reserved[4];
+ uint8_t subvp_psr_support;
+ uint8_t gecc_enable;
};
struct dmub_visual_confirm_color {
@@ -360,7 +362,7 @@ union dmub_fw_boot_status {
uint32_t optimized_init_done : 1; /**< 1 if optimized init done */
uint32_t restore_required : 1; /**< 1 if driver should call restore */
uint32_t defer_load : 1; /**< 1 if VBIOS data is deferred programmed */
- uint32_t reserved : 1;
+ uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
uint32_t detection_required: 1; /**< if detection need to be triggered by driver */
uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */
} bits; /**< status bits */
@@ -376,6 +378,7 @@ enum dmub_fw_boot_status_bit {
DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if init done */
DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3), /**< 1 if driver should call restore */
DMUB_FW_BOOT_STATUS_BIT_DEFERRED_LOADED = (1 << 4), /**< 1 if VBIOS data is deferred programmed */
+ DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/
DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */
};
@@ -395,6 +398,12 @@ enum dmub_lvtma_status_bit {
DMUB_LVTMA_STATUS_BIT_EDP_ON = (1 << 1),
};
+enum dmub_ips_disable_type {
+ DMUB_IPS_DISABLE_IPS1 = 1,
+ DMUB_IPS_DISABLE_IPS2 = 2,
+ DMUB_IPS_DISABLE_IPS2_Z10 = 3,
+};
+
/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
@@ -419,7 +428,10 @@ union dmub_fw_boot_options {
uint32_t dpia_hpd_int_enable_supported: 1; /* 1 if dpia hpd int enable supported */
uint32_t usb4_dpia_bw_alloc_supported: 1; /* 1 if USB4 dpia BW allocation supported */
uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
- uint32_t reserved : 14; /**< reserved */
+ uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
+ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
+ uint32_t ips_disable: 2; /* options to disable ips support*/
+ uint32_t reserved : 10; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -988,16 +1000,25 @@ struct dmub_rb_cmd_mall {
};
/**
- * enum dmub_cmd_cab_type - TODO:
+ * enum dmub_cmd_cab_type - CAB command data.
*/
enum dmub_cmd_cab_type {
+ /**
+ * No idle optimizations (i.e. no CAB)
+ */
DMUB_CMD__CAB_NO_IDLE_OPTIMIZATION = 0,
+ /**
+ * No DCN requests for memory
+ */
DMUB_CMD__CAB_NO_DCN_REQ = 1,
+ /**
+ * Fit surfaces in CAB (i.e. CAB enable)
+ */
DMUB_CMD__CAB_DCN_SS_FIT_IN_CAB = 2,
};
/**
- * struct dmub_rb_cmd_cab_for_ss - TODO:
+ * struct dmub_rb_cmd_cab - CAB command data.
*/
struct dmub_rb_cmd_cab_for_ss {
struct dmub_cmd_header header;
@@ -1005,6 +1026,9 @@ struct dmub_rb_cmd_cab_for_ss {
uint8_t debug_bits; /* debug bits */
};
+/**
+ * Enum for indicating which MCLK switch mode per pipe
+ */
enum mclk_switch_mode {
NONE = 0,
FPO = 1,
@@ -1125,8 +1149,6 @@ struct dmub_rb_cmd_idle_opt_dcn_restore {
*/
struct dmub_dcn_notify_idle_cntl_data {
uint8_t driver_idle;
- uint8_t d3_entry;
- uint8_t trigger;
uint8_t pad[1];
};
@@ -3550,6 +3572,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__DPIA_HPD_INT_ENABLE command.
*/
struct dmub_rb_cmd_dpia_hpd_int_enable dpia_hpd_int_enable;
+ /**
+ * Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
+ */
+ struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index a6540e27044d..98dad0d47e72 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -282,6 +282,11 @@ void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
+uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_WPTR);
+}
+
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index c2e5831ac52c..1df128e57ed3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -202,6 +202,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+uint32_t dmub_dcn20_get_inbox1_wptr(struct dmub_srv *dmub);
+
uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 51bb9bceb1b1..2d212bc974cc 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -54,9 +54,3 @@ const struct dmub_srv_common_regs dmub_srv_dcn21_regs = {
#undef DMUB_SF
};
-/* Shared functions. */
-
-bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub)
-{
- return REG_READ(DMCUB_SCRATCH10) == 0;
-}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
index 6fd5b0cd4ef3..8c4033ae4007 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
@@ -32,8 +32,4 @@
extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs;
-/* Hardware functions. */
-
-bool dmub_dcn21_is_phy_init(struct dmub_srv *dmub);
-
#endif /* _DMUB_DCN21_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index c90b9ee42e12..ebf7aeec4029 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -242,6 +242,11 @@ void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
+uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_WPTR);
+}
+
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index f6db6f89d45d..7d5c10ee539b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -204,6 +204,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+uint32_t dmub_dcn31_get_inbox1_wptr(struct dmub_srv *dmub);
+
uint32_t dmub_dcn31_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn31_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index 9c20516be066..bf5994e292d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -116,10 +116,6 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
break;
}
- /* Clear the GPINT command manually so we don't reset again. */
- cmd.all = 0;
- dmub->hw_funcs.set_gpint(dmub, cmd);
-
/* Force reset in case we timed out, DMCUB is likely hung. */
}
@@ -133,6 +129,10 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
REG_WRITE(DMCUB_SCRATCH0, 0);
+
+ /* Clear the GPINT command manually so we don't reset again. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
}
void dmub_dcn32_reset_release(struct dmub_srv *dmub)
@@ -266,6 +266,11 @@ void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base);
}
+uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_INBOX1_WPTR);
+}
+
uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub)
{
return REG_READ(DMCUB_INBOX1_RPTR);
@@ -434,6 +439,7 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13);
diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14);
diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15);
+ diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16);
diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR);
diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR);
@@ -464,6 +470,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
+
+ diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 7d1a6eb4d665..d58a1e4b9f1c 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -107,6 +107,7 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH15) \
DMUB_SR(DMCUB_SCRATCH16) \
DMUB_SR(DMCUB_SCRATCH17) \
+ DMUB_SR(DMCUB_GPINT_DATAIN0) \
DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(DMCUB_GPINT_DATAOUT) \
DMUB_SR(CC_DC_PIPE_DIS) \
@@ -206,6 +207,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
+uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub);
+
uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub);
void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 92c18bfb98b3..ea3bed70a229 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -166,6 +166,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load = dmub_dcn20_backdoor_load;
funcs->setup_windows = dmub_dcn20_setup_windows;
funcs->setup_mailbox = dmub_dcn20_setup_mailbox;
+ funcs->get_inbox1_wptr = dmub_dcn20_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
@@ -190,11 +191,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data;
- if (asic == DMUB_ASIC_DCN21) {
+ if (asic == DMUB_ASIC_DCN21)
dmub->regs = &dmub_srv_dcn21_regs;
- funcs->is_phy_init = dmub_dcn21_is_phy_init;
- }
if (asic == DMUB_ASIC_DCN30) {
dmub->regs = &dmub_srv_dcn30_regs;
@@ -237,6 +236,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load = dmub_dcn31_backdoor_load;
funcs->setup_windows = dmub_dcn31_setup_windows;
funcs->setup_mailbox = dmub_dcn31_setup_mailbox;
+ funcs->get_inbox1_wptr = dmub_dcn31_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr;
funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox;
@@ -275,6 +275,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->backdoor_load_zfb_mode = dmub_dcn32_backdoor_load_zfb_mode;
funcs->setup_windows = dmub_dcn32_setup_windows;
funcs->setup_mailbox = dmub_dcn32_setup_mailbox;
+ funcs->get_inbox1_wptr = dmub_dcn32_get_inbox1_wptr;
funcs->get_inbox1_rptr = dmub_dcn32_get_inbox1_rptr;
funcs->set_inbox1_wptr = dmub_dcn32_set_inbox1_wptr;
funcs->setup_out_mailbox = dmub_dcn32_setup_out_mailbox;
@@ -644,6 +645,20 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
return DMUB_STATUS_OK;
}
+enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
+{
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
+ dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
+ }
+
+ return DMUB_STATUS_OK;
+}
+
enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
{
if (!dmub->sw_init)
@@ -721,27 +736,6 @@ enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
-enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
- uint32_t timeout_us)
-{
- uint32_t i = 0;
-
- if (!dmub->hw_init)
- return DMUB_STATUS_INVALID;
-
- if (!dmub->hw_funcs.is_phy_init)
- return DMUB_STATUS_OK;
-
- for (i = 0; i <= timeout_us; i += 10) {
- if (dmub->hw_funcs.is_phy_init(dmub))
- return DMUB_STATUS_OK;
-
- udelay(10);
- }
-
- return DMUB_STATUS_TIMEOUT;
-}
-
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 5c41a4751db4..5798c0eafa1f 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -1137,10 +1137,6 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
if (in_out_vrr->supported &&
in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
- unsigned int oldest_index = plane->time.index + 1;
-
- if (oldest_index >= DC_PLANE_UPDATE_TIMES_MAX)
- oldest_index = 0;
last_render_time_in_us = curr_time_stamp_in_us -
plane->time.prev_update_time_in_us;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 51e76bce92ea..30349881a283 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -116,6 +116,27 @@ static const struct abm_parameters * const abm_settings[] = {
abm_settings_config2,
};
+static const struct dm_bl_data_point custom_backlight_curve0[] = {
+ {2, 14}, {4, 16}, {6, 18}, {8, 21}, {10, 23}, {12, 26}, {14, 29}, {16, 32}, {18, 35},
+ {20, 38}, {22, 41}, {24, 44}, {26, 48}, {28, 52}, {30, 55}, {32, 59}, {34, 62},
+ {36, 67}, {38, 71}, {40, 75}, {42, 80}, {44, 84}, {46, 88}, {48, 93}, {50, 98},
+ {52, 103}, {54, 108}, {56, 113}, {58, 118}, {60, 123}, {62, 129}, {64, 135}, {66, 140},
+ {68, 146}, {70, 152}, {72, 158}, {74, 164}, {76, 171}, {78, 177}, {80, 183}, {82, 190},
+ {84, 197}, {86, 204}, {88, 211}, {90, 218}, {92, 225}, {94, 232}, {96, 240}, {98, 247}};
+
+struct custom_backlight_profile {
+ uint8_t ac_level_percentage;
+ uint8_t dc_level_percentage;
+ uint8_t min_input_signal;
+ uint8_t max_input_signal;
+ uint8_t num_data_points;
+ const struct dm_bl_data_point *data_points;
+};
+
+static const struct custom_backlight_profile custom_backlight_profiles[] = {
+ {100, 32, 12, 255, ARRAY_SIZE(custom_backlight_curve0), custom_backlight_curve0},
+};
+
#define NUM_AMBI_LEVEL 5
#define NUM_AGGR_LEVEL 4
#define NUM_POWER_FN_SEGS 8
@@ -944,3 +965,25 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+
+bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps)
+{
+ unsigned int data_points_size;
+
+ if (config_no >= ARRAY_SIZE(custom_backlight_profiles))
+ return false;
+
+ data_points_size = custom_backlight_profiles[config_no].num_data_points
+ * sizeof(custom_backlight_profiles[config_no].data_points[0]);
+
+ caps->size = sizeof(struct dm_acpi_atif_backlight_caps) - sizeof(caps->data_points) + data_points_size;
+ caps->flags = 0;
+ caps->error_code = 0;
+ caps->ac_level_percentage = custom_backlight_profiles[config_no].ac_level_percentage;
+ caps->dc_level_percentage = custom_backlight_profiles[config_no].dc_level_percentage;
+ caps->min_input_signal = custom_backlight_profiles[config_no].min_input_signal;
+ caps->max_input_signal = custom_backlight_profiles[config_no].max_input_signal;
+ caps->num_data_points = custom_backlight_profiles[config_no].num_data_points;
+ memcpy(caps->data_points, custom_backlight_profiles[config_no].data_points, data_points_size);
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 1d3079e56799..ffc924c9991b 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -62,4 +62,7 @@ bool mod_power_only_edp(const struct dc_state *context,
bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
struct dc_stream_state *stream,
struct psr_config *config);
+
+bool fill_custom_backlight_caps(unsigned int config_no,
+ struct dm_acpi_atif_backlight_caps *caps);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index e4a22c68517d..f175e65b853a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,7 +240,6 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index f5e08b60f66e..36c831b280ed 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
index 32a5a00fd8ae..21be23ec3c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c
@@ -973,7 +973,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1048,13 +1048,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
}
mutex_unlock(&adev->grbm_idx_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
error:
mutex_unlock(&adev->grbm_idx_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return result;
}
@@ -1068,7 +1068,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0),
@@ -1081,12 +1081,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
return 0;
error:
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
index 9757d47dd6b8..309a9d3bc1b7 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c
@@ -915,7 +915,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -940,7 +940,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -949,11 +949,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -966,7 +966,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -985,7 +985,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1002,11 +1002,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1027,7 +1027,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -1048,7 +1048,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -1057,11 +1057,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
@@ -1075,7 +1075,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
@@ -1096,7 +1096,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
@@ -1116,11 +1116,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1138,7 +1138,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
int result;
- amdgpu_gfx_rlc_enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1151,7 +1151,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, false);
- amdgpu_gfx_rlc_exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 5ce433e2c16a..f1580a26a850 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -359,7 +359,7 @@ struct pp_hwmgr_func {
int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks);
int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state);
int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode);
- int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire);
+ int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool acquire);
int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state);
int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate);
int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 794ffd4a29c5..f32ce29edba7 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -425,11 +425,12 @@ struct ast_device *ast_device_create(const struct drm_driver *drv,
return ERR_PTR(-EIO);
/*
- * If we don't have IO space at all, use MMIO now and
- * assume the chip has MMIO enabled by default (rev 0x20
- * and higher).
+ * After AST2500, MMIO is enabled by default, and it should be adopted
+ * to be compatible with Arm.
*/
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
+ if (pdev->revision >= 0x40) {
+ ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
+ } else if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) {
drm_info(dev, "platform has no IO space, trying MMIO\n");
ast->ioregs = ast->regs + AST_IO_MM_OFFSET;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 64458982be40..6bb1b8b27d7a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -641,19 +641,27 @@ static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len,
struct drm_rect *clip)
{
+ u32 line_length = info->fix.line_length;
+ u32 fb_height = info->var.yres;
off_t end = off + len;
u32 x1 = 0;
- u32 y1 = off / info->fix.line_length;
+ u32 y1 = off / line_length;
u32 x2 = info->var.xres;
- u32 y2 = DIV_ROUND_UP(end, info->fix.line_length);
+ u32 y2 = DIV_ROUND_UP(end, line_length);
+
+ /* Don't allow any of them beyond the bottom bound of display area */
+ if (y1 > fb_height)
+ y1 = fb_height;
+ if (y2 > fb_height)
+ y2 = fb_height;
if ((y2 - y1) == 1) {
/*
* We've only written to a single scanline. Try to reduce
* the number of horizontal pixels that need an update.
*/
- off_t bit_off = (off % info->fix.line_length) * 8;
- off_t bit_end = (end % info->fix.line_length) * 8;
+ off_t bit_off = (off % line_length) * 8;
+ off_t bit_end = (end % line_length) * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 295382cd09b0..3fd6c733ff4e 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -221,7 +221,7 @@ mipi_dsi_device_register_full(struct mipi_dsi_host *host,
return dsi;
}
- dsi->dev.of_node = info->node;
+ device_set_node(&dsi->dev, of_fwnode_handle(info->node));
dsi->channel = info->channel;
strlcpy(dsi->name, info->type, sizeof(dsi->name));
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 97b0d4ae221a..9af76e376ca9 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -237,9 +237,11 @@ i915-y += \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
display/intel_display.o \
+ display/intel_display_driver.o \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
+ display/intel_display_reset.o \
display/intel_display_rps.o \
display/intel_dmc.o \
display/intel_dpio_phy.o \
@@ -259,6 +261,7 @@ i915-y += \
display/intel_hdcp_gsc.o \
display/intel_hotplug.o \
display/intel_hti.o \
+ display/intel_load_detect.o \
display/intel_lpe_audio.o \
display/intel_modeset_verify.o \
display/intel_modeset_setup.o \
@@ -298,6 +301,7 @@ i915-y += \
display/icl_dsi.o \
display/intel_backlight.o \
display/intel_crt.o \
+ display/intel_cx0_phy.o \
display/intel_ddi.o \
display/intel_ddi_buf_trans.o \
display/intel_display_trace.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 448ea26786e0..5c187e6e0472 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -7,6 +7,7 @@
#include "g4x_hdmi.h"
#include "i915_reg.h"
+#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
@@ -80,15 +81,67 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
return ret;
}
+static bool connector_is_hdmi(struct drm_connector *connector)
+{
+ struct intel_encoder *encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+
+ return encoder && encoder->type == INTEL_OUTPUT_HDMI;
+}
+
+static bool g4x_compute_has_hdmi_sink(struct intel_atomic_state *state,
+ struct intel_crtc *this_crtc)
+{
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ /*
+ * On g4x only one HDMI port can transmit infoframes/audio at
+ * any given time. Select the first suitable port for this duty.
+ *
+ * See also g4x_hdmi_connector_atomic_check().
+ */
+ for_each_new_connector_in_state(&state->base, connector, conn_state, i) {
+ struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!connector_is_hdmi(connector))
+ continue;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (!intel_hdmi_compute_has_hdmi_sink(encoder, crtc_state, conn_state))
+ continue;
+
+ return crtc == this_crtc;
+ }
+
+ return false;
+}
+
static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (HAS_PCH_SPLIT(i915))
crtc_state->has_pch_encoder = true;
+ if (IS_G4X(i915))
+ crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc);
+ else
+ crtc_state->has_hdmi_sink =
+ intel_hdmi_compute_has_hdmi_sink(encoder, crtc_state, conn_state);
+
return intel_hdmi_compute_config(encoder, crtc_state, conn_state);
}
@@ -546,6 +599,66 @@ intel_hdmi_hotplug(struct intel_encoder *encoder,
return state;
}
+int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->dev);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *conn;
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(connector, state);
+ if (ret)
+ return ret;
+
+ if (!IS_G4X(i915))
+ return 0;
+
+ if (!intel_connector_needs_modeset(to_intel_atomic_state(state), connector))
+ return 0;
+
+ /*
+ * On g4x only one HDMI port can transmit infoframes/audio
+ * at any given time. Make sure all enabled HDMI ports are
+ * included in the state so that it's possible to select
+ * one of them for this duty.
+ *
+ * See also g4x_compute_has_hdmi_sink().
+ */
+ drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+
+ if (!connector_is_hdmi(conn))
+ continue;
+
+ drm_dbg_kms(&i915->drm, "Adding [CONNECTOR:%d:%s]\n",
+ conn->base.id, conn->name);
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ crtc = conn_state->crtc;
+ if (!crtc)
+ continue;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ crtc_state->mode_changed = true;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.h b/drivers/gpu/drm/i915/display/g4x_hdmi.h
index db9a93bc9321..1e3ea7f3c846 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
@@ -11,9 +11,13 @@
#include "i915_reg_defs.h"
enum port;
+struct drm_atomic_state;
+struct drm_connector;
struct drm_i915_private;
void g4x_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port);
+int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
#endif
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index caef72d38798..af0c79a4c9a4 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_display.h"
@@ -3447,9 +3448,10 @@ void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
drm_modeset_acquire_init(&ctx, 0);
-retry:
state->acquire_ctx = &ctx;
+ to_intel_atomic_state(state)->internal = true;
+retry:
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h
index a7875cbcd05a..b87ae369685a 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.h
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.h
@@ -12,7 +12,6 @@ struct drm_i915_private;
struct intel_crtc_state;
struct intel_plane_state;
-int ilk_wm_max_level(const struct drm_i915_private *i915);
bool ilk_disable_lp_wm(struct drm_i915_private *i915);
void ilk_wm_sanitize(struct drm_i915_private *i915);
bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable);
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index c9aeba0ecf91..c133928a0655 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1591,6 +1591,7 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
&pipe_config->hw.adjusted_mode;
int ret;
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
ret = intel_panel_compute_config(intel_connector, adjusted_mode);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index a9a3f3715279..7cf51dd8c056 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -265,7 +265,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_post = false;
crtc_state->fifo_changed = false;
crtc_state->preload_luts = false;
- crtc_state->inherited = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->do_async_flip = false;
crtc_state->fb_bits = 0;
@@ -311,262 +310,6 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
kfree(crtc_state);
}
-static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
- int num_scalers_need, struct intel_crtc *intel_crtc,
- const char *name, int idx,
- struct intel_plane_state *plane_state,
- int *scaler_id)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- int j;
- u32 mode;
-
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (scaler_state->scalers[j].in_use)
- continue;
-
- *scaler_id = j;
- scaler_state->scalers[*scaler_id].in_use = 1;
- break;
- }
- }
-
- if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
- "Cannot find scaler for %s:%d\n", name, idx))
- return -EINVAL;
-
- /* set scaler mode */
- if (plane_state && plane_state->hw.fb &&
- plane_state->hw.fb->format->is_yuv &&
- plane_state->hw.fb->format->num_planes > 1) {
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- if (DISPLAY_VER(dev_priv) == 9) {
- mode = SKL_PS_SCALER_MODE_NV12;
- } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
- /*
- * On gen11+'s HDR planes we only use the scaler for
- * scaling. They have a dedicated chroma upsampler, so
- * we don't need the scaler to upsample the UV plane.
- */
- mode = PS_SCALER_MODE_NORMAL;
- } else {
- struct intel_plane *linked =
- plane_state->planar_linked_plane;
-
- mode = PS_SCALER_MODE_PLANAR;
-
- if (linked)
- mode |= PS_PLANE_Y_SEL(linked->id);
- }
- } else if (DISPLAY_VER(dev_priv) >= 10) {
- mode = PS_SCALER_MODE_NORMAL;
- } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
- /*
- * when only 1 scaler is in use on a pipe with 2 scalers
- * scaler 0 operates in high quality (HQ) mode.
- * In this case use scaler 0 to take advantage of HQ mode
- */
- scaler_state->scalers[*scaler_id].in_use = 0;
- *scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
- mode = SKL_PS_SCALER_MODE_HQ;
- } else {
- mode = SKL_PS_SCALER_MODE_DYN;
- }
-
- /*
- * FIXME: we should also check the scaler factors for pfit, so
- * this shouldn't be tied directly to planes.
- */
- if (plane_state && plane_state->hw.fb) {
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_rect *src = &plane_state->uapi.src;
- const struct drm_rect *dst = &plane_state->uapi.dst;
- int hscale, vscale, max_vscale, max_hscale;
-
- /*
- * FIXME: When two scalers are needed, but only one of
- * them needs to downscale, we should make sure that
- * the one that needs downscaling support is assigned
- * as the first scaler, so we don't reject downscaling
- * unnecessarily.
- */
-
- if (DISPLAY_VER(dev_priv) >= 14) {
- /*
- * On versions 14 and up, only the first
- * scaler supports a vertical scaling factor
- * of more than 1.0, while a horizontal
- * scaling factor of 3.0 is supported.
- */
- max_hscale = 0x30000 - 1;
- if (*scaler_id == 0)
- max_vscale = 0x30000 - 1;
- else
- max_vscale = 0x10000;
-
- } else if (DISPLAY_VER(dev_priv) >= 10 ||
- !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
- max_hscale = 0x30000 - 1;
- max_vscale = 0x30000 - 1;
- } else {
- max_hscale = 0x20000 - 1;
- max_vscale = 0x20000 - 1;
- }
-
- /*
- * FIXME: We should change the if-else block above to
- * support HQ vs dynamic scaler properly.
- */
-
- /* Check if required scaling is within limits */
- hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
- vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
-
- if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Scaler %d doesn't support required plane scaling\n",
- *scaler_id);
- drm_rect_debug_print("src: ", src, true);
- drm_rect_debug_print("dst: ", dst, false);
-
- return -EINVAL;
- }
- }
-
- drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
- scaler_state->scalers[*scaler_id].mode = mode;
-
- return 0;
-}
-
-/**
- * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev_priv: i915 device
- * @intel_crtc: intel crtc
- * @crtc_state: incoming crtc_state to validate and setup scalers
- *
- * This function sets up scalers based on staged scaling requests for
- * a @crtc and its planes. It is called from crtc level check path. If request
- * is a supportable request, it attaches scalers to requested planes and crtc.
- *
- * This function takes into account the current scaler(s) in use by any planes
- * not being part of this atomic state
- *
- * Returns:
- * 0 - scalers were setup succesfully
- * error code - otherwise
- */
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_plane *plane = NULL;
- struct intel_plane *intel_plane;
- struct intel_plane_state *plane_state = NULL;
- struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- struct drm_atomic_state *drm_state = crtc_state->uapi.state;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
- int num_scalers_need;
- int i;
-
- num_scalers_need = hweight32(scaler_state->scaler_users);
-
- /*
- * High level flow:
- * - staged scaler requests are already in scaler_state->scaler_users
- * - check whether staged scaling requests can be supported
- * - add planes using scalers that aren't in current transaction
- * - assign scalers to requested users
- * - as part of plane commit, scalers will be committed
- * (i.e., either attached or detached) to respective planes in hw
- * - as part of crtc_commit, scaler will be either attached or detached
- * to crtc in hw
- */
-
- /* fail if required scalers > available scalers */
- if (num_scalers_need > intel_crtc->num_scalers){
- drm_dbg_kms(&dev_priv->drm,
- "Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
- return -EINVAL;
- }
-
- /* walkthrough scaler_users bits and start assigning scalers */
- for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
- int *scaler_id;
- const char *name;
- int idx, ret;
-
- /* skip if scaler not required */
- if (!(scaler_state->scaler_users & (1 << i)))
- continue;
-
- if (i == SKL_CRTC_INDEX) {
- name = "CRTC";
- idx = intel_crtc->base.base.id;
-
- /* panel fitter case: assign as a crtc scaler */
- scaler_id = &scaler_state->scaler_id;
- } else {
- name = "PLANE";
-
- /* plane scaler case: assign as a plane scaler */
- /* find the plane that set the bit as scaler_user */
- plane = drm_state->planes[i].ptr;
-
- /*
- * to enable/disable hq mode, add planes that are using scaler
- * into this transaction
- */
- if (!plane) {
- struct drm_plane_state *state;
-
- /*
- * GLK+ scalers don't have a HQ mode so it
- * isn't necessary to change between HQ and dyn mode
- * on those platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 10)
- continue;
-
- plane = drm_plane_from_index(&dev_priv->drm, i);
- state = drm_atomic_get_plane_state(drm_state, plane);
- if (IS_ERR(state)) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
- return PTR_ERR(state);
- }
- }
-
- intel_plane = to_intel_plane(plane);
- idx = plane->base.id;
-
- /* plane on different crtc cannot be a scaler user of this crtc */
- if (drm_WARN_ON(&dev_priv->drm,
- intel_plane->pipe != intel_crtc->pipe))
- continue;
-
- plane_state = intel_atomic_get_new_plane_state(intel_state,
- intel_plane);
- scaler_id = &plane_state->scaler_id;
- }
-
- ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
- intel_crtc, name, idx,
- plane_state, scaler_id);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
struct drm_atomic_state *
intel_atomic_state_alloc(struct drm_device *dev)
{
@@ -599,6 +342,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
drm_atomic_state_default_clear(&state->base);
intel_atomic_clear_global_state(state);
+ /* state->internal not reset on purpose */
+
state->dpll_set = state->modeset = false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 1dc439983dd9..e506f6a87344 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -52,8 +52,4 @@ struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct intel_crtc *crtc);
-int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
-
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 40de9f0f171b..4125ee07a271 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -36,6 +36,7 @@
#include <drm/drm_fourcc.h>
#include "i915_config.h"
+#include "i915_reg.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_display_rps.h"
@@ -1028,7 +1029,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
int ret;
if (old_obj) {
- const struct intel_crtc_state *crtc_state =
+ const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state,
to_intel_crtc(old_plane_state->hw.crtc));
@@ -1043,7 +1044,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- if (intel_crtc_needs_modeset(crtc_state)) {
+ if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv,
false, 0,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 3d5a9bbc6fde..3d9c9b4f27f8 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1039,6 +1039,7 @@ static void glk_force_audio_cdclk(struct drm_i915_private *i915,
return;
state->acquire_ctx = &ctx;
+ to_intel_atomic_state(state)->internal = true;
retry:
ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 202321ffbe2a..597d5816ad1b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -150,6 +150,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
{
int ret;
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return 0;
+
/* bspec says to keep retrying for at least 1 ms */
ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 084a483f9776..6bed75f1541a 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1896,7 +1896,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* mailbox communication, skip
* this step.
*/
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
/* NOOP */;
else if (DISPLAY_VER(dev_priv) >= 11)
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
@@ -1932,10 +1932,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* NOOP - No Pcode communication needed for
* Display versions 14 and beyond
*/;
- else if (DISPLAY_VER(dev_priv) >= 11)
+ else if (DISPLAY_VER(dev_priv) >= 11 && !IS_DG2(dev_priv))
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
- else
+ if (DISPLAY_VER(dev_priv) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
* experiment.
@@ -1946,7 +1946,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level,
150, 2);
-
+ }
if (ret) {
drm_err(&dev_priv->drm,
"PCode CDCLK freq set failed, (err %d, freq %d)\n",
@@ -2242,6 +2242,38 @@ void intel_cdclk_dump_config(struct drm_i915_private *i915,
cdclk_config->voltage_level);
}
+static void intel_pcode_notify(struct drm_i915_private *i915,
+ u8 voltage_level,
+ u8 active_pipe_count,
+ u16 cdclk,
+ bool cdclk_update_valid,
+ bool pipe_count_update_valid)
+{
+ int ret;
+ u32 update_mask = 0;
+
+ if (!IS_DG2(i915))
+ return;
+
+ update_mask = DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, active_pipe_count, voltage_level);
+
+ if (cdclk_update_valid)
+ update_mask |= DISPLAY_TO_PCODE_CDCLK_VALID;
+
+ if (pipe_count_update_valid)
+ update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
+
+ ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ if (ret)
+ drm_err(&i915->drm,
+ "Failed to inform PCU about display config (err %d)\n",
+ ret);
+}
+
/**
* intel_set_cdclk - Push the CDCLK configuration to the hardware
* @dev_priv: i915 device
@@ -2311,6 +2343,88 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
}
+static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ unsigned int cdclk = 0; u8 voltage_level, num_active_pipes = 0;
+ bool change_cdclk, update_pipe_count;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual) &&
+ new_cdclk_state->active_pipes ==
+ old_cdclk_state->active_pipes)
+ return;
+
+ /* According to "Sequence Before Frequency Change", voltage level set to 0x3 */
+ voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX;
+
+ change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
+ update_pipe_count = hweight8(new_cdclk_state->active_pipes) >
+ hweight8(old_cdclk_state->active_pipes);
+
+ /*
+ * According to "Sequence Before Frequency Change",
+ * if CDCLK is increasing, set bits 25:16 to upcoming CDCLK,
+ * if CDCLK is decreasing or not changing, set bits 25:16 to current CDCLK,
+ * which basically means we choose the maximum of old and new CDCLK, if we know both
+ */
+ if (change_cdclk)
+ cdclk = max(new_cdclk_state->actual.cdclk, old_cdclk_state->actual.cdclk);
+
+ /*
+ * According to "Sequence For Pipe Count Change",
+ * if pipe count is increasing, set bits 25:16 to upcoming pipe count
+ * (power well is enabled)
+ * no action if it is decreasing, before the change
+ */
+ if (update_pipe_count)
+ num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+
+ intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk,
+ change_cdclk, update_pipe_count);
+}
+
+static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ unsigned int cdclk = 0; u8 voltage_level, num_active_pipes = 0;
+ bool update_cdclk, update_pipe_count;
+
+ /* According to "Sequence After Frequency Change", set voltage to used level */
+ voltage_level = new_cdclk_state->actual.voltage_level;
+
+ update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
+ update_pipe_count = hweight8(new_cdclk_state->active_pipes) <
+ hweight8(old_cdclk_state->active_pipes);
+
+ /*
+ * According to "Sequence After Frequency Change",
+ * set bits 25:16 to current CDCLK
+ */
+ if (update_cdclk)
+ cdclk = new_cdclk_state->actual.cdclk;
+
+ /*
+ * According to "Sequence For Pipe Count Change",
+ * if pipe count is decreasing, set bits 25:16 to current pipe count,
+ * after the change(power well is disabled)
+ * no action if it is increasing, after the change
+ */
+ if (update_pipe_count)
+ num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+
+ intel_pcode_notify(i915, voltage_level, num_active_pipes, cdclk,
+ update_cdclk, update_pipe_count);
+}
+
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
* @state: intel atomic state
@@ -2321,7 +2435,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
void
intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2332,11 +2446,14 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
&new_cdclk_state->actual))
return;
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_pre_notify(state);
+
if (pipe == INVALID_PIPE ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
- intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
}
@@ -2350,7 +2467,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
void
intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_cdclk_state *old_cdclk_state =
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
@@ -2361,11 +2478,14 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
&new_cdclk_state->actual))
return;
+ if (IS_DG2(i915))
+ intel_cdclk_pcode_post_notify(state);
+
if (pipe != INVALID_PIPE &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
- intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
}
@@ -2871,6 +2991,21 @@ int intel_cdclk_init(struct drm_i915_private *dev_priv)
return 0;
}
+static bool intel_cdclk_need_serialize(struct drm_i915_private *i915,
+ const struct intel_cdclk_state *old_cdclk_state,
+ const struct intel_cdclk_state *new_cdclk_state)
+{
+ bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
+ hweight8(new_cdclk_state->active_pipes);
+ bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual);
+ /*
+ * We need to poke hw for gen >= 12, because we notify PCode if
+ * pipe power well count changes.
+ */
+ return cdclk_changed || (IS_DG2(i915) && power_well_cnt_changed);
+}
+
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
@@ -2892,8 +3027,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
- if (intel_cdclk_changed(&old_cdclk_state->actual,
- &new_cdclk_state->actual)) {
+ if (intel_cdclk_need_serialize(dev_priv, old_cdclk_state, new_cdclk_state)) {
/*
* Also serialize commits across all crtcs
* if the actual hw needs to be poked.
@@ -3235,6 +3369,27 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq;
}
+static int i915_cdclk_info_show(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = m->private;
+
+ seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
+ seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
+ seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info);
+
+void intel_cdclk_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root,
+ i915, &i915_cdclk_info_fops);
+}
+
static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 51e2f6a11ce4..48fd7d39e0cd 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -82,5 +82,6 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->display.cdclk.obj))
int intel_cdclk_init(struct drm_i915_private *dev_priv);
+void intel_cdclk_debugfs_register(struct drm_i915_private *i915);
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 36aac88143ac..07f1afe1d406 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -70,6 +70,11 @@ struct intel_color_funcs {
const struct drm_property_blob *blob1,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut);
+ /*
+ * Read out the CSCs (if any) from the hardware into the
+ * software state. Used by eg. the hardware state checker.
+ */
+ void (*read_csc)(struct intel_crtc_state *crtc_state);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -116,46 +121,52 @@ struct intel_color_funcs {
#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
#define ILK_CSC_COEFF_1_0 0x7800
-
-#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
-
-/* Nop pre/post offsets */
-static const u16 ilk_csc_off_zero[3] = {};
-
-/* Identity matrix */
-static const u16 ilk_csc_coeff_identity[9] = {
- ILK_CSC_COEFF_1_0, 0, 0,
- 0, ILK_CSC_COEFF_1_0, 0,
- 0, 0, ILK_CSC_COEFF_1_0,
-};
-
-/* Limited range RGB post offsets */
-static const u16 ilk_csc_postoff_limited_range[3] = {
- ILK_CSC_POSTOFF_LIMITED_RANGE,
- ILK_CSC_POSTOFF_LIMITED_RANGE,
- ILK_CSC_POSTOFF_LIMITED_RANGE,
+#define ILK_CSC_COEFF_LIMITED_RANGE ((235 - 16) << (12 - 8)) /* exponent 0 */
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 << (12 - 8))
+
+static const struct intel_csc_matrix ilk_csc_matrix_identity = {
+ .preoff = {},
+ .coeff = {
+ ILK_CSC_COEFF_1_0, 0, 0,
+ 0, ILK_CSC_COEFF_1_0, 0,
+ 0, 0, ILK_CSC_COEFF_1_0,
+ },
+ .postoff = {},
};
/* Full range RGB -> limited range RGB matrix */
-static const u16 ilk_csc_coeff_limited_range[9] = {
- ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
- 0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
- 0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+static const struct intel_csc_matrix ilk_csc_matrix_limited_range = {
+ .preoff = {},
+ .coeff = {
+ ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+ 0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+ 0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+ },
+ .postoff = {
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ },
};
/* BT.709 full range RGB -> limited range YCbCr matrix */
-static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
- 0x1e08, 0x9cc0, 0xb528,
- 0x2ba8, 0x09d8, 0x37e8,
- 0xbce8, 0x9ad8, 0x1e08,
+static const struct intel_csc_matrix ilk_csc_matrix_rgb_to_ycbcr = {
+ .preoff = {},
+ .coeff = {
+ 0x1e08, 0x9cc0, 0xb528,
+ 0x2ba8, 0x09d8, 0x37e8,
+ 0xbce8, 0x9ad8, 0x1e08,
+ },
+ .postoff = {
+ 0x0800, 0x0100, 0x0800,
+ },
};
-/* Limited range YCbCr post offsets */
-static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
- 0x0800, 0x0100, 0x0800,
-};
+static void intel_csc_clear(struct intel_csc_matrix *csc)
+{
+ memset(csc, 0, sizeof(*csc));
+}
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
@@ -189,69 +200,182 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
}
static void ilk_update_pipe_csc(struct intel_crtc *crtc,
- const u16 preoff[3],
- const u16 coeff[9],
- const u16 postoff[3])
+ const struct intel_csc_matrix *csc)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write_fw(i915, PIPE_CSC_PREOFF_HI(pipe), csc->preoff[0]);
+ intel_de_write_fw(i915, PIPE_CSC_PREOFF_ME(pipe), csc->preoff[1]);
+ intel_de_write_fw(i915, PIPE_CSC_PREOFF_LO(pipe), csc->preoff[2]);
intel_de_write_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe),
- coeff[0] << 16 | coeff[1]);
- intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+ csc->coeff[0] << 16 | csc->coeff[1]);
+ intel_de_write_fw(i915, PIPE_CSC_COEFF_BY(pipe),
+ csc->coeff[2] << 16);
intel_de_write_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe),
- coeff[3] << 16 | coeff[4]);
- intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+ csc->coeff[3] << 16 | csc->coeff[4]);
+ intel_de_write_fw(i915, PIPE_CSC_COEFF_BU(pipe),
+ csc->coeff[5] << 16);
intel_de_write_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe),
- coeff[6] << 16 | coeff[7]);
- intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+ csc->coeff[6] << 16 | csc->coeff[7]);
+ intel_de_write_fw(i915, PIPE_CSC_COEFF_BV(pipe),
+ csc->coeff[8] << 16);
- if (DISPLAY_VER(i915) >= 7) {
- intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe),
- postoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe),
- postoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe),
- postoff[2]);
- }
+ if (DISPLAY_VER(i915) < 7)
+ return;
+
+ intel_de_write_fw(i915, PIPE_CSC_POSTOFF_HI(pipe), csc->postoff[0]);
+ intel_de_write_fw(i915, PIPE_CSC_POSTOFF_ME(pipe), csc->postoff[1]);
+ intel_de_write_fw(i915, PIPE_CSC_POSTOFF_LO(pipe), csc->postoff[2]);
+}
+
+static void ilk_read_pipe_csc(struct intel_crtc *crtc,
+ struct intel_csc_matrix *csc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tmp;
+
+ csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(pipe));
+ csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_ME(pipe));
+ csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_PREOFF_LO(pipe));
+
+ tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RY_GY(pipe));
+ csc->coeff[0] = tmp >> 16;
+ csc->coeff[1] = tmp & 0xffff;
+ tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BY(pipe));
+ csc->coeff[2] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RU_GU(pipe));
+ csc->coeff[3] = tmp >> 16;
+ csc->coeff[4] = tmp & 0xffff;
+ tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BU(pipe));
+ csc->coeff[5] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_RV_GV(pipe));
+ csc->coeff[6] = tmp >> 16;
+ csc->coeff[7] = tmp & 0xffff;
+ tmp = intel_de_read_fw(i915, PIPE_CSC_COEFF_BV(pipe));
+ csc->coeff[8] = tmp >> 16;
+
+ if (DISPLAY_VER(i915) < 7)
+ return;
+
+ csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_HI(pipe));
+ csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_ME(pipe));
+ csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_POSTOFF_LO(pipe));
+}
+
+static void ilk_read_csc(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->csc_enable)
+ ilk_read_pipe_csc(crtc, &crtc_state->csc);
+}
+
+static void skl_read_csc(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /*
+ * Display WA #1184: skl,glk
+ * Wa_1406463849: icl
+ *
+ * Danger! On SKL-ICL *reads* from the CSC coeff/offset registers
+ * will disarm an already armed CSC double buffer update.
+ * So this must not be called while armed. Fortunately the state checker
+ * readout happens only after the update has been already been latched.
+ *
+ * On earlier and later platforms only writes to said registers will
+ * disarm the update. This is considered normal behavior and also
+ * happens with various other hardware units.
+ */
+ if (crtc_state->csc_enable)
+ ilk_read_pipe_csc(crtc, &crtc_state->csc);
}
static void icl_update_output_csc(struct intel_crtc *crtc,
- const u16 preoff[3],
- const u16 coeff[9],
- const u16 postoff[3])
+ const struct intel_csc_matrix *csc)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), csc->preoff[0]);
+ intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), csc->preoff[1]);
+ intel_de_write_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), csc->preoff[2]);
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
- coeff[0] << 16 | coeff[1]);
+ csc->coeff[0] << 16 | csc->coeff[1]);
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
- coeff[2] << 16);
+ csc->coeff[2] << 16);
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
- coeff[3] << 16 | coeff[4]);
+ csc->coeff[3] << 16 | csc->coeff[4]);
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
- coeff[5] << 16);
+ csc->coeff[5] << 16);
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
- coeff[6] << 16 | coeff[7]);
+ csc->coeff[6] << 16 | csc->coeff[7]);
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
- coeff[8] << 16);
+ csc->coeff[8] << 16);
+
+ intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), csc->postoff[0]);
+ intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), csc->postoff[1]);
+ intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), csc->postoff[2]);
+}
+
+static void icl_read_output_csc(struct intel_crtc *crtc,
+ struct intel_csc_matrix *csc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tmp;
+
+ csc->preoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_HI(pipe));
+ csc->preoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_ME(pipe));
+ csc->preoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_PREOFF_LO(pipe));
+
+ tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe));
+ csc->coeff[0] = tmp >> 16;
+ csc->coeff[1] = tmp & 0xffff;
+ tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BY(pipe));
+ csc->coeff[2] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe));
+ csc->coeff[3] = tmp >> 16;
+ csc->coeff[4] = tmp & 0xffff;
+ tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BU(pipe));
+ csc->coeff[5] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe));
+ csc->coeff[6] = tmp >> 16;
+ csc->coeff[7] = tmp & 0xffff;
+ tmp = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_COEFF_BV(pipe));
+ csc->coeff[8] = tmp >> 16;
+
+ csc->postoff[0] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe));
+ csc->postoff[1] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe));
+ csc->postoff[2] = intel_de_read_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe));
+}
+
+static void icl_read_csc(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
- intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+ /*
+ * Wa_1406463849: icl
+ *
+ * See skl_read_csc()
+ */
+ if (crtc_state->csc_mode & ICL_CSC_ENABLE)
+ ilk_read_pipe_csc(crtc, &crtc_state->csc);
+
+ if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE)
+ icl_read_output_csc(crtc, &crtc_state->output_csc);
}
static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
@@ -294,14 +418,32 @@ static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
return !ilk_lut_limited_range(crtc_state);
}
+static void ilk_csc_copy(struct drm_i915_private *i915,
+ struct intel_csc_matrix *dst,
+ const struct intel_csc_matrix *src)
+{
+ *dst = *src;
+
+ if (DISPLAY_VER(i915) < 7)
+ memset(dst->postoff, 0, sizeof(dst->postoff));
+}
+
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
- u16 coeffs[9], bool limited_color_range)
+ struct intel_csc_matrix *csc,
+ bool limited_color_range)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
const u64 *input;
u64 temp[9];
int i;
+ /* for preoff/postoff */
+ if (limited_color_range)
+ ilk_csc_copy(i915, csc, &ilk_csc_matrix_limited_range);
+ else
+ ilk_csc_copy(i915, csc, &ilk_csc_matrix_identity);
+
if (limited_color_range)
input = ctm_mult_by_limited(temp, ctm->matrix);
else
@@ -320,54 +462,49 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
*/
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
- coeffs[i] = 0;
+ csc->coeff[i] = 0;
/* sign bit */
if (CTM_COEFF_NEGATIVE(input[i]))
- coeffs[i] |= 1 << 15;
+ csc->coeff[i] |= 1 << 15;
if (abs_coeff < CTM_COEFF_0_125)
- coeffs[i] |= (3 << 12) |
+ csc->coeff[i] |= (3 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 12);
else if (abs_coeff < CTM_COEFF_0_25)
- coeffs[i] |= (2 << 12) |
+ csc->coeff[i] |= (2 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 11);
else if (abs_coeff < CTM_COEFF_0_5)
- coeffs[i] |= (1 << 12) |
+ csc->coeff[i] |= (1 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 10);
else if (abs_coeff < CTM_COEFF_1_0)
- coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ csc->coeff[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
else if (abs_coeff < CTM_COEFF_2_0)
- coeffs[i] |= (7 << 12) |
+ csc->coeff[i] |= (7 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 8);
else
- coeffs[i] |= (6 << 12) |
+ csc->coeff[i] |= (6 << 12) |
ILK_CSC_COEFF_FP(abs_coeff, 7);
}
}
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void ilk_assign_csc(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
bool limited_color_range = ilk_csc_limited_range(crtc_state);
if (crtc_state->hw.ctm) {
- u16 coeff[9];
+ drm_WARN_ON(&i915->drm, !crtc_state->csc_enable);
- ilk_csc_convert_ctm(crtc_state, coeff, limited_color_range);
- ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
- limited_color_range ?
- ilk_csc_postoff_limited_range :
- ilk_csc_off_zero);
+ ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, limited_color_range);
} else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
- ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
- ilk_csc_coeff_rgb_to_ycbcr,
- ilk_csc_postoff_rgb_to_ycbcr);
+ drm_WARN_ON(&i915->drm, !crtc_state->csc_enable);
+
+ ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_rgb_to_ycbcr);
} else if (limited_color_range) {
- ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
- ilk_csc_coeff_limited_range,
- ilk_csc_postoff_limited_range);
+ drm_WARN_ON(&i915->drm, !crtc_state->csc_enable);
+
+ ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_limited_range);
} else if (crtc_state->csc_enable) {
/*
* On GLK both pipe CSC and degamma LUT are controlled
@@ -377,45 +514,67 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
*/
drm_WARN_ON(&i915->drm, !IS_GEMINILAKE(i915));
- ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
- ilk_csc_coeff_identity,
- ilk_csc_off_zero);
+ ilk_csc_copy(i915, &crtc_state->csc, &ilk_csc_matrix_identity);
+ } else {
+ intel_csc_clear(&crtc_state->csc);
}
}
-static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ if (crtc_state->csc_enable)
+ ilk_update_pipe_csc(crtc, &crtc_state->csc);
+}
+
+static void icl_assign_csc(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
if (crtc_state->hw.ctm) {
- u16 coeff[9];
+ drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) == 0);
+
+ ilk_csc_convert_ctm(crtc_state, &crtc_state->csc, false);
+ } else {
+ drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_CSC_ENABLE) != 0);
- ilk_csc_convert_ctm(crtc_state, coeff, false);
- ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
- coeff, ilk_csc_off_zero);
+ intel_csc_clear(&crtc_state->csc);
}
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
- icl_update_output_csc(crtc, ilk_csc_off_zero,
- ilk_csc_coeff_rgb_to_ycbcr,
- ilk_csc_postoff_rgb_to_ycbcr);
+ drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
+
+ ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_rgb_to_ycbcr);
} else if (crtc_state->limited_color_range) {
- icl_update_output_csc(crtc, ilk_csc_off_zero,
- ilk_csc_coeff_limited_range,
- ilk_csc_postoff_limited_range);
+ drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) == 0);
+
+ ilk_csc_copy(i915, &crtc_state->output_csc, &ilk_csc_matrix_limited_range);
+ } else {
+ drm_WARN_ON(&i915->drm, (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE) != 0);
+
+ intel_csc_clear(&crtc_state->output_csc);
}
}
-static void chv_load_cgm_csc(struct intel_crtc *crtc,
- const struct drm_property_blob *blob)
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const struct drm_color_ctm *ctm = blob->data;
- enum pipe pipe = crtc->pipe;
- u16 coeffs[9];
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->csc_mode & ICL_CSC_ENABLE)
+ ilk_update_pipe_csc(crtc, &crtc_state->csc);
+
+ if (crtc_state->csc_mode & ICL_OUTPUT_CSC_ENABLE)
+ icl_update_output_csc(crtc, &crtc_state->output_csc);
+}
+
+static void chv_cgm_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+ struct intel_csc_matrix *csc)
+{
+ const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
int i;
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ for (i = 0; i < 9; i++) {
u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
/* Round coefficient. */
@@ -423,26 +582,83 @@ static void chv_load_cgm_csc(struct intel_crtc *crtc,
/* Clamp to hardware limits. */
abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
- coeffs[i] = 0;
+ csc->coeff[i] = 0;
/* Write coefficients in S3.12 format. */
if (ctm->matrix[i] & (1ULL << 63))
- coeffs[i] |= 1 << 15;
+ csc->coeff[i] |= 1 << 15;
- coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
- coeffs[i] |= (abs_coeff >> 20) & 0xfff;
+ csc->coeff[i] |= ((abs_coeff >> 32) & 7) << 12;
+ csc->coeff[i] |= (abs_coeff >> 20) & 0xfff;
}
+}
+
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct intel_csc_matrix *csc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
+ csc->coeff[1] << 16 | csc->coeff[0]);
intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
+ csc->coeff[3] << 16 | csc->coeff[2]);
intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
+ csc->coeff[5] << 16 | csc->coeff[4]);
intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
+ csc->coeff[7] << 16 | csc->coeff[6]);
intel_de_write_fw(i915, CGM_PIPE_CSC_COEFF8(pipe),
- coeffs[8]);
+ csc->coeff[8]);
+}
+
+static void chv_read_cgm_csc(struct intel_crtc *crtc,
+ struct intel_csc_matrix *csc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 tmp;
+
+ tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF01(pipe));
+ csc->coeff[0] = tmp & 0xffff;
+ csc->coeff[1] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF23(pipe));
+ csc->coeff[2] = tmp & 0xffff;
+ csc->coeff[3] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF45(pipe));
+ csc->coeff[4] = tmp & 0xffff;
+ csc->coeff[5] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF67(pipe));
+ csc->coeff[6] = tmp & 0xffff;
+ csc->coeff[7] = tmp >> 16;
+
+ tmp = intel_de_read_fw(i915, CGM_PIPE_CSC_COEFF8(pipe));
+ csc->coeff[8] = tmp & 0xffff;
+}
+
+static void chv_read_csc(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_read_cgm_csc(crtc, &crtc_state->csc);
+}
+
+static void chv_assign_csc(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (crtc_state->hw.ctm) {
+ drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) == 0);
+
+ chv_cgm_csc_convert_ctm(crtc_state, &crtc_state->csc);
+ } else {
+ drm_WARN_ON(&i915->drm, (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) != 0);
+
+ intel_csc_clear(&crtc_state->csc);
+ }
}
/* convert hw value with given bit_precision to lut property val */
@@ -1410,10 +1626,9 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- const struct drm_property_blob *ctm = crtc_state->hw.ctm;
if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
- chv_load_cgm_csc(crtc, ctm);
+ chv_load_cgm_csc(crtc, &crtc_state->csc);
if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, pre_csc_lut);
@@ -1522,6 +1737,9 @@ void intel_color_get_config(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
i915->display.funcs.color->read_luts(crtc_state);
+
+ if (i915->display.funcs.color->read_csc)
+ i915->display.funcs.color->read_csc(crtc_state);
}
bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
@@ -1840,6 +2058,8 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
intel_assign_luts(crtc_state);
+ chv_assign_csc(crtc_state);
+
crtc_state->preload_luts = chv_can_preload_luts(crtc_state);
return 0;
@@ -1962,6 +2182,8 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ ilk_assign_csc(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -2068,6 +2290,8 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ ilk_assign_csc(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -2199,6 +2423,8 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ ilk_assign_csc(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -2261,6 +2487,8 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
intel_assign_luts(crtc_state);
+ icl_assign_csc(crtc_state);
+
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
return 0;
@@ -3135,6 +3363,7 @@ static const struct intel_color_funcs chv_color_funcs = {
.load_luts = chv_load_luts,
.read_luts = chv_read_luts,
.lut_equal = chv_lut_equal,
+ .read_csc = chv_read_csc,
};
static const struct intel_color_funcs i965_color_funcs = {
@@ -3160,6 +3389,7 @@ static const struct intel_color_funcs tgl_color_funcs = {
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
+ .read_csc = icl_read_csc,
};
static const struct intel_color_funcs icl_color_funcs = {
@@ -3170,6 +3400,7 @@ static const struct intel_color_funcs icl_color_funcs = {
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
.lut_equal = icl_lut_equal,
+ .read_csc = icl_read_csc,
};
static const struct intel_color_funcs glk_color_funcs = {
@@ -3179,6 +3410,7 @@ static const struct intel_color_funcs glk_color_funcs = {
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
.lut_equal = glk_lut_equal,
+ .read_csc = skl_read_csc,
};
static const struct intel_color_funcs skl_color_funcs = {
@@ -3188,6 +3420,7 @@ static const struct intel_color_funcs skl_color_funcs = {
.load_luts = bdw_load_luts,
.read_luts = bdw_read_luts,
.lut_equal = ivb_lut_equal,
+ .read_csc = skl_read_csc,
};
static const struct intel_color_funcs bdw_color_funcs = {
@@ -3197,6 +3430,7 @@ static const struct intel_color_funcs bdw_color_funcs = {
.load_luts = bdw_load_luts,
.read_luts = bdw_read_luts,
.lut_equal = ivb_lut_equal,
+ .read_csc = ilk_read_csc,
};
static const struct intel_color_funcs hsw_color_funcs = {
@@ -3206,6 +3440,7 @@ static const struct intel_color_funcs hsw_color_funcs = {
.load_luts = ivb_load_luts,
.read_luts = ivb_read_luts,
.lut_equal = ivb_lut_equal,
+ .read_csc = ilk_read_csc,
};
static const struct intel_color_funcs ivb_color_funcs = {
@@ -3215,6 +3450,7 @@ static const struct intel_color_funcs ivb_color_funcs = {
.load_luts = ivb_load_luts,
.read_luts = ivb_read_luts,
.lut_equal = ivb_lut_equal,
+ .read_csc = ilk_read_csc,
};
static const struct intel_color_funcs ilk_color_funcs = {
@@ -3224,6 +3460,7 @@ static const struct intel_color_funcs ilk_color_funcs = {
.load_luts = ilk_load_luts,
.read_luts = ilk_read_luts,
.lut_equal = ilk_lut_equal,
+ .read_csc = ilk_read_csc,
};
void intel_color_crtc_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 38e9c61c2344..f0f4897b3c3c 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -48,6 +48,7 @@
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
+#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
@@ -394,6 +395,7 @@ static int intel_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
@@ -821,9 +823,9 @@ intel_crt_detect(struct drm_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *intel_encoder = &crt->base;
+ struct drm_atomic_state *state;
intel_wakeref_t wakeref;
- int status, ret;
- struct intel_load_detect_pipe tmp;
+ int status;
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
@@ -881,8 +883,12 @@ load_detect:
}
/* for pre-945g platforms use load detect */
- ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
- if (ret > 0) {
+ state = intel_load_detect_get_pipe(connector, ctx);
+ if (IS_ERR(state)) {
+ status = PTR_ERR(state);
+ } else if (!state) {
+ status = connector_status_unknown;
+ } else {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else if (DISPLAY_VER(dev_priv) < 4)
@@ -892,11 +898,7 @@ load_detect:
status = connector_status_disconnected;
else
status = connector_status_unknown;
- intel_release_load_detect_pipe(connector, &tmp, ctx);
- } else if (ret == 0) {
- status = connector_status_unknown;
- } else {
- status = ret;
+ intel_load_detect_release_pipe(connector, state, ctx);
}
out:
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index ed45a6934854..df7d05f1e14b 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -510,6 +510,13 @@ void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state)
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
+ /*
+ * M/N is double buffered on the transcoder's undelayed vblank,
+ * so with seamless M/N we must evade both vblanks.
+ */
+ if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
+ min -= adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+
if (min <= 0 || max <= 0)
goto irq_disable;
@@ -692,7 +699,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
* FIXME Should be synchronized with the start of vblank somehow...
*/
if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
- intel_crtc_update_active_timings(new_crtc_state);
+ intel_crtc_update_active_timings(new_crtc_state,
+ new_crtc_state->vrr.enable);
local_irq_enable();
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index 73077137fb99..51a4c8df9e65 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -16,6 +16,16 @@ struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+/*
+ * FIXME: We should instead only take spinlocks once for the entire update
+ * instead of once per mmio.
+ */
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+#define VBLANK_EVASION_TIME_US 250
+#else
+#define VBLANK_EVASION_TIME_US 100
+#endif
+
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 54c8adc0702e..27d7bab46427 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -158,6 +158,45 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
DRM_RECT_ARG(&plane_state->uapi.dst));
}
+static void
+ilk_dump_csc(struct drm_i915_private *i915, const char *name,
+ const struct intel_csc_matrix *csc)
+{
+ int i;
+
+ drm_dbg_kms(&i915->drm,
+ "%s: pre offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->preoff[0], csc->preoff[1], csc->preoff[2]);
+
+ for (i = 0; i < 3; i++)
+ drm_dbg_kms(&i915->drm,
+ "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
+
+ if (DISPLAY_VER(i915) < 7)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s: post offsets: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->postoff[0], csc->postoff[1], csc->postoff[2]);
+}
+
+static void
+chv_dump_csc(struct drm_i915_private *i915, const char *name,
+ const struct intel_csc_matrix *csc)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ drm_dbg_kms(&i915->drm,
+ "%s: coefficients: 0x%04x 0x%04x 0x%04x\n", name,
+ csc->coeff[3 * i + 0],
+ csc->coeff[3 * i + 1],
+ csc->coeff[3 * i + 2]);
+}
+
void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
struct intel_atomic_state *state,
const char *context)
@@ -178,10 +217,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
drm_dbg_kms(&i915->drm,
- "active: %s, output_types: %s (0x%x), output format: %s\n",
+ "active: %s, output_types: %s (0x%x), output format: %s, sink format: %s\n",
str_yes_no(pipe_config->hw.active),
buf, pipe_config->output_types,
- intel_output_format_name(pipe_config->output_format));
+ intel_output_format_name(pipe_config->output_format),
+ intel_output_format_name(pipe_config->sink_format));
drm_dbg_kms(&i915->drm,
"cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
@@ -325,6 +365,14 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
pipe_config->post_csc_lut ?
drm_color_lut_size(pipe_config->post_csc_lut) : 0);
+ if (DISPLAY_VER(i915) >= 11)
+ ilk_dump_csc(i915, "output csc", &pipe_config->output_csc);
+
+ if (!HAS_GMCH(i915))
+ ilk_dump_csc(i915, "pipe csc", &pipe_config->csc);
+ else if (IS_CHERRYVIEW(i915))
+ chv_dump_csc(i915, "cgm csc", &pipe_config->csc);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
new file mode 100644
index 000000000000..d94127e7448b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -0,0 +1,2963 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_reg.h"
+#include "intel_cx0_phy.h"
+#include "intel_cx0_phy_regs.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_hdmi.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
+#include "intel_tc.h"
+
+#define MB_WRITE_COMMITTED true
+#define MB_WRITE_UNCOMMITTED false
+
+#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
+ for ((__lane) = 0; (__lane) < 2; (__lane)++) \
+ for_each_if((__lane_mask) & BIT(__lane))
+
+#define INTEL_CX0_LANE0 BIT(0)
+#define INTEL_CX0_LANE1 BIT(1)
+#define INTEL_CX0_BOTH_LANES (INTEL_CX0_LANE1 | INTEL_CX0_LANE0)
+
+bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
+{
+ if (IS_METEORLAKE(i915) && (phy < PHY_C))
+ return true;
+
+ return false;
+}
+
+static int lane_mask_to_lane(u8 lane_mask)
+{
+ if (WARN_ON((lane_mask & ~INTEL_CX0_BOTH_LANES) ||
+ hweight8(lane_mask) != 1))
+ return 0;
+
+ return ilog2(lane_mask);
+}
+
+static void
+assert_dc_off(struct drm_i915_private *i915)
+{
+ bool enabled;
+
+ enabled = intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF);
+ drm_WARN_ON(&i915->drm, !enabled);
+}
+
+/*
+ * Prepare HW for CX0 phy transactions.
+ *
+ * It is required that PSR and DC5/6 are disabled before any CX0 message
+ * bus transaction is executed.
+ */
+static intel_wakeref_t intel_cx0_phy_transaction_begin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_pause(intel_dp);
+ return intel_display_power_get(i915, POWER_DOMAIN_DC_OFF);
+}
+
+static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, wakeref);
+}
+
+static void intel_clear_response_ready_flag(struct drm_i915_private *i915,
+ enum port port, int lane)
+{
+ intel_de_rmw(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+ 0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
+}
+
+static void intel_cx0_bus_reset(struct drm_i915_private *i915, enum port port, int lane)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_RESET);
+
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_RESET,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_err_once(&i915->drm, "Failed to bring PHY %c to idle.\n", phy_name(phy));
+ return;
+ }
+
+ intel_clear_response_ready_flag(i915, port, lane);
+}
+
+static int intel_cx0_wait_for_ack(struct drm_i915_private *i915, enum port port,
+ int command, int lane, u32 *val)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+
+ if (__intel_de_wait_for_register(i915,
+ XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_FAST_US,
+ XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ drm_dbg_kms(&i915->drm, "PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
+ phy_name(phy), *val);
+ return -ETIMEDOUT;
+ }
+
+ if (*val & XELPDP_PORT_P2M_ERROR_SET) {
+ drm_dbg_kms(&i915->drm, "PHY %c Error occurred during %s command. Status: 0x%x\n", phy_name(phy),
+ command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
+ intel_cx0_bus_reset(i915, port, lane);
+ return -EINVAL;
+ }
+
+ if (REG_FIELD_GET(XELPDP_PORT_P2M_COMMAND_TYPE_MASK, *val) != command) {
+ drm_dbg_kms(&i915->drm, "PHY %c Not a %s response. MSGBUS Status: 0x%x.\n", phy_name(phy),
+ command == XELPDP_PORT_P2M_COMMAND_READ_ACK ? "read" : "write", *val);
+ intel_cx0_bus_reset(i915, port, lane);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+ int ack;
+ u32 val;
+
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_dbg_kms(&i915->drm,
+ "PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
+ intel_cx0_bus_reset(i915, port, lane);
+ return -ETIMEDOUT;
+ }
+
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING |
+ XELPDP_PORT_M2P_COMMAND_READ |
+ XELPDP_PORT_M2P_ADDRESS(addr));
+
+ ack = intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_READ_ACK, lane, &val);
+ if (ack < 0) {
+ intel_cx0_bus_reset(i915, port, lane);
+ return ack;
+ }
+
+ intel_clear_response_ready_flag(i915, port, lane);
+
+ return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
+}
+
+static u8 __intel_cx0_read(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+ int i, status;
+
+ assert_dc_off(i915);
+
+ /* 3 tries is assumed to be enough to read successfully */
+ for (i = 0; i < 3; i++) {
+ status = __intel_cx0_read_once(i915, port, lane, addr);
+
+ if (status >= 0)
+ return status;
+ }
+
+ drm_err_once(&i915->drm, "PHY %c Read %04x failed after %d retries.\n",
+ phy_name(phy), addr, i);
+
+ return 0;
+}
+
+static u8 intel_cx0_read(struct drm_i915_private *i915, enum port port,
+ u8 lane_mask, u16 addr)
+{
+ int lane = lane_mask_to_lane(lane_mask);
+
+ return __intel_cx0_read(i915, port, lane, addr);
+}
+
+static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr, u8 data, bool committed)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+ u32 val;
+
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_dbg_kms(&i915->drm,
+ "PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
+ intel_cx0_bus_reset(i915, port, lane);
+ return -ETIMEDOUT;
+ }
+
+ intel_de_write(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING |
+ (committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
+ XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED) |
+ XELPDP_PORT_M2P_DATA(data) |
+ XELPDP_PORT_M2P_ADDRESS(addr));
+
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_dbg_kms(&i915->drm,
+ "PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
+ intel_cx0_bus_reset(i915, port, lane);
+ return -ETIMEDOUT;
+ }
+
+ if (committed) {
+ if (intel_cx0_wait_for_ack(i915, port, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val) < 0) {
+ intel_cx0_bus_reset(i915, port, lane);
+ return -EINVAL;
+ }
+ } else if ((intel_de_read(i915, XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane)) &
+ XELPDP_PORT_P2M_ERROR_SET)) {
+ drm_dbg_kms(&i915->drm,
+ "PHY %c Error occurred during write command.\n", phy_name(phy));
+ intel_cx0_bus_reset(i915, port, lane);
+ return -EINVAL;
+ }
+
+ intel_clear_response_ready_flag(i915, port, lane);
+
+ return 0;
+}
+
+static void __intel_cx0_write(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr, u8 data, bool committed)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+ int i, status;
+
+ assert_dc_off(i915);
+
+ /* 3 tries is assumed to be enough to write successfully */
+ for (i = 0; i < 3; i++) {
+ status = __intel_cx0_write_once(i915, port, lane, addr, data, committed);
+
+ if (status == 0)
+ return;
+ }
+
+ drm_err_once(&i915->drm,
+ "PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
+}
+
+static void intel_cx0_write(struct drm_i915_private *i915, enum port port,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
+{
+ int lane;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ __intel_cx0_write(i915, port, lane, addr, data, committed);
+}
+
+static void intel_c20_sram_write(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr, u16 data)
+{
+ assert_dc_off(i915);
+
+ intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(i915, port, lane, PHY_C20_WR_ADDRESS_L, addr & 0xff, 0);
+
+ intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_H, data >> 8, 0);
+ intel_cx0_write(i915, port, lane, PHY_C20_WR_DATA_L, data & 0xff, 1);
+}
+
+static u16 intel_c20_sram_read(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr)
+{
+ u16 val;
+
+ assert_dc_off(i915);
+
+ intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_H, addr >> 8, 0);
+ intel_cx0_write(i915, port, lane, PHY_C20_RD_ADDRESS_L, addr & 0xff, 1);
+
+ val = intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_H);
+ val <<= 8;
+ val |= intel_cx0_read(i915, port, lane, PHY_C20_RD_DATA_L);
+
+ return val;
+}
+
+static void __intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+ int lane, u16 addr, u8 clear, u8 set, bool committed)
+{
+ u8 old, val;
+
+ old = __intel_cx0_read(i915, port, lane, addr);
+ val = (old & ~clear) | set;
+
+ if (val != old)
+ __intel_cx0_write(i915, port, lane, addr, val, committed);
+}
+
+static void intel_cx0_rmw(struct drm_i915_private *i915, enum port port,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+{
+ u8 lane;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ __intel_cx0_rmw(i915, port, lane, addr, clear, set, committed);
+}
+
+static u8 intel_c10_get_tx_vboost_lvl(const struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ (crtc_state->port_clock == 540000 ||
+ crtc_state->port_clock == 810000))
+ return 5;
+ else
+ return 4;
+ } else {
+ return 5;
+ }
+}
+
+static u8 intel_c10_get_tx_term_ctl(const struct intel_crtc_state *crtc_state)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
+ (crtc_state->port_clock == 540000 ||
+ crtc_state->port_clock == 810000))
+ return 5;
+ else
+ return 2;
+ } else {
+ return 6;
+ }
+}
+
+void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ intel_wakeref_t wakeref;
+ int n_entries, ln;
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&i915->drm, !trans)) {
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+ return;
+ }
+
+ if (intel_is_c10phy(i915, phy)) {
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED);
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CMN(3),
+ C10_CMN3_TXVBOOST_MASK,
+ C10_CMN3_TXVBOOST(intel_c10_get_tx_vboost_lvl(crtc_state)),
+ MB_WRITE_UNCOMMITTED);
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_TX(1),
+ C10_TX1_TERMCTL_MASK,
+ C10_TX1_TERMCTL(intel_c10_get_tx_term_ctl(crtc_state)),
+ MB_WRITE_COMMITTED);
+ }
+
+ for (ln = 0; ln < crtc_state->lane_count; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+ int lane, tx;
+
+ lane = ln / 2;
+ tx = ln % 2;
+
+ intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 0),
+ C10_PHY_OVRD_LEVEL_MASK,
+ C10_PHY_OVRD_LEVEL(trans->entries[level].snps.pre_cursor),
+ MB_WRITE_COMMITTED);
+ intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 1),
+ C10_PHY_OVRD_LEVEL_MASK,
+ C10_PHY_OVRD_LEVEL(trans->entries[level].snps.vswing),
+ MB_WRITE_COMMITTED);
+ intel_cx0_rmw(i915, encoder->port, BIT(lane), PHY_CX0_VDROVRD_CTL(lane, tx, 2),
+ C10_PHY_OVRD_LEVEL_MASK,
+ C10_PHY_OVRD_LEVEL(trans->entries[level].snps.post_cursor),
+ MB_WRITE_COMMITTED);
+ }
+
+ /* Write Override enables in 0xD71 */
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_OVRD,
+ 0, PHY_C10_VDR_OVRD_TX1 | PHY_C10_VDR_OVRD_TX2,
+ MB_WRITE_COMMITTED);
+
+ if (intel_is_c10phy(i915, phy))
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_UPDATE_CFG, MB_WRITE_COMMITTED);
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
+/*
+ * Basic DP link rates with 38.4 MHz reference clock.
+ * Note: The tables below are with SSC. In non-ssc
+ * registers 0xC04 to 0xC08(pll[4] to pll[8]) will be
+ * programmed 0.
+ */
+
+static const struct intel_c10pll_state mtl_c10_dp_rbr = {
+ .clock = 162000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0xB4,
+ .pll[1] = 0,
+ .pll[2] = 0x30,
+ .pll[3] = 0x1,
+ .pll[4] = 0x26,
+ .pll[5] = 0x0C,
+ .pll[6] = 0x98,
+ .pll[7] = 0x46,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0xC0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x2,
+ .pll[16] = 0x84,
+ .pll[17] = 0x4F,
+ .pll[18] = 0xE5,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_edp_r216 = {
+ .clock = 216000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0x4,
+ .pll[1] = 0,
+ .pll[2] = 0xA2,
+ .pll[3] = 0x1,
+ .pll[4] = 0x33,
+ .pll[5] = 0x10,
+ .pll[6] = 0x75,
+ .pll[7] = 0xB3,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x2,
+ .pll[16] = 0x85,
+ .pll[17] = 0x0F,
+ .pll[18] = 0xE6,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_edp_r243 = {
+ .clock = 243000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0x34,
+ .pll[1] = 0,
+ .pll[2] = 0xDA,
+ .pll[3] = 0x1,
+ .pll[4] = 0x39,
+ .pll[5] = 0x12,
+ .pll[6] = 0xE3,
+ .pll[7] = 0xE9,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0x20,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x2,
+ .pll[16] = 0x85,
+ .pll[17] = 0x8F,
+ .pll[18] = 0xE6,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_dp_hbr1 = {
+ .clock = 270000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0xF4,
+ .pll[1] = 0,
+ .pll[2] = 0xF8,
+ .pll[3] = 0x0,
+ .pll[4] = 0x20,
+ .pll[5] = 0x0A,
+ .pll[6] = 0x29,
+ .pll[7] = 0x10,
+ .pll[8] = 0x1, /* Verify */
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0xA0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x1,
+ .pll[16] = 0x84,
+ .pll[17] = 0x4F,
+ .pll[18] = 0xE5,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_edp_r324 = {
+ .clock = 324000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0xB4,
+ .pll[1] = 0,
+ .pll[2] = 0x30,
+ .pll[3] = 0x1,
+ .pll[4] = 0x26,
+ .pll[5] = 0x0C,
+ .pll[6] = 0x98,
+ .pll[7] = 0x46,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0xC0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x1,
+ .pll[16] = 0x85,
+ .pll[17] = 0x4F,
+ .pll[18] = 0xE6,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_edp_r432 = {
+ .clock = 432000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0x4,
+ .pll[1] = 0,
+ .pll[2] = 0xA2,
+ .pll[3] = 0x1,
+ .pll[4] = 0x33,
+ .pll[5] = 0x10,
+ .pll[6] = 0x75,
+ .pll[7] = 0xB3,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x1,
+ .pll[16] = 0x85,
+ .pll[17] = 0x0F,
+ .pll[18] = 0xE6,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_dp_hbr2 = {
+ .clock = 540000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0xF4,
+ .pll[1] = 0,
+ .pll[2] = 0xF8,
+ .pll[3] = 0,
+ .pll[4] = 0x20,
+ .pll[5] = 0x0A,
+ .pll[6] = 0x29,
+ .pll[7] = 0x10,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0xA0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0,
+ .pll[16] = 0x84,
+ .pll[17] = 0x4F,
+ .pll[18] = 0xE5,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_edp_r675 = {
+ .clock = 675000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0xB4,
+ .pll[1] = 0,
+ .pll[2] = 0x3E,
+ .pll[3] = 0x1,
+ .pll[4] = 0xA8,
+ .pll[5] = 0x0C,
+ .pll[6] = 0x33,
+ .pll[7] = 0x54,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0xC8,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0,
+ .pll[16] = 0x85,
+ .pll[17] = 0x8F,
+ .pll[18] = 0xE6,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_dp_hbr3 = {
+ .clock = 810000,
+ .tx = 0x10,
+ .cmn = 0x21,
+ .pll[0] = 0x34,
+ .pll[1] = 0,
+ .pll[2] = 0x84,
+ .pll[3] = 0x1,
+ .pll[4] = 0x30,
+ .pll[5] = 0x0F,
+ .pll[6] = 0x3D,
+ .pll[7] = 0x98,
+ .pll[8] = 0x1,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0xF0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0,
+ .pll[16] = 0x84,
+ .pll[17] = 0x0F,
+ .pll[18] = 0xE5,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = {
+ &mtl_c10_dp_rbr,
+ &mtl_c10_dp_hbr1,
+ &mtl_c10_dp_hbr2,
+ &mtl_c10_dp_hbr3,
+ NULL,
+};
+
+static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
+ &mtl_c10_dp_rbr,
+ &mtl_c10_edp_r216,
+ &mtl_c10_edp_r243,
+ &mtl_c10_dp_hbr1,
+ &mtl_c10_edp_r324,
+ &mtl_c10_edp_r432,
+ &mtl_c10_dp_hbr2,
+ &mtl_c10_edp_r675,
+ &mtl_c10_dp_hbr3,
+ NULL,
+};
+
+/* C20 basic DP 1.4 tables */
+static const struct intel_c20pll_state mtl_c20_dp_rbr = {
+ .link_bit_rate = 162000,
+ .clock = 162000,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x5800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x50a8, /* mpllb cfg0 */
+ 0x2120, /* mpllb cfg1 */
+ 0xcd9a, /* mpllb cfg2 */
+ 0xbfc1, /* mpllb cfg3 */
+ 0x5ab8, /* mpllb cfg4 */
+ 0x4c34, /* mpllb cfg5 */
+ 0x2000, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x6000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0000, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
+ .link_bit_rate = 270000,
+ .clock = 270000,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x4800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x308c, /* mpllb cfg0 */
+ 0x2110, /* mpllb cfg1 */
+ 0xcc9c, /* mpllb cfg2 */
+ 0xbfc1, /* mpllb cfg3 */
+ 0x4b9a, /* mpllb cfg4 */
+ 0x3f81, /* mpllb cfg5 */
+ 0x2000, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x5000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0000, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
+ .link_bit_rate = 540000,
+ .clock = 540000,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x4800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x108c, /* mpllb cfg0 */
+ 0x2108, /* mpllb cfg1 */
+ 0xcc9c, /* mpllb cfg2 */
+ 0xbfc1, /* mpllb cfg3 */
+ 0x4b9a, /* mpllb cfg4 */
+ 0x3f81, /* mpllb cfg5 */
+ 0x2000, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x5000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0000, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
+ .link_bit_rate = 810000,
+ .clock = 810000,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x4800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x10d2, /* mpllb cfg0 */
+ 0x2108, /* mpllb cfg1 */
+ 0x8d98, /* mpllb cfg2 */
+ 0xbfc1, /* mpllb cfg3 */
+ 0x7166, /* mpllb cfg4 */
+ 0x5f42, /* mpllb cfg5 */
+ 0x2000, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x7800, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0000, /* mpllb cfg10 */
+ },
+};
+
+/* C20 basic DP 2.0 tables */
+static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
+ .link_bit_rate = 1000000, /* 10 Gbps */
+ .clock = 312500,
+ .tx = { 0xbe21, /* tx cfg0 */
+ 0x4800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mplla = { 0x3104, /* mplla cfg0 */
+ 0xd105, /* mplla cfg1 */
+ 0xc025, /* mplla cfg2 */
+ 0xc025, /* mplla cfg3 */
+ 0x8c00, /* mplla cfg4 */
+ 0x759a, /* mplla cfg5 */
+ 0x4000, /* mplla cfg6 */
+ 0x0003, /* mplla cfg7 */
+ 0x3555, /* mplla cfg8 */
+ 0x0001, /* mplla cfg9 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
+ .link_bit_rate = 1350000, /* 13.5 Gbps */
+ .clock = 421875,
+ .tx = { 0xbea0, /* tx cfg0 */
+ 0x4800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x015f, /* mpllb cfg0 */
+ 0x2205, /* mpllb cfg1 */
+ 0x1b17, /* mpllb cfg2 */
+ 0xffc1, /* mpllb cfg3 */
+ 0xe100, /* mpllb cfg4 */
+ 0xbd00, /* mpllb cfg5 */
+ 0x2000, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x4800, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0000, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
+ .link_bit_rate = 2000000, /* 20 Gbps */
+ .clock = 625000,
+ .tx = { 0xbe20, /* tx cfg0 */
+ 0x4800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = {0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mplla = { 0x3104, /* mplla cfg0 */
+ 0xd105, /* mplla cfg1 */
+ 0xc025, /* mplla cfg2 */
+ 0xc025, /* mplla cfg3 */
+ 0xa6ab, /* mplla cfg4 */
+ 0x8c00, /* mplla cfg5 */
+ 0x4000, /* mplla cfg6 */
+ 0x0003, /* mplla cfg7 */
+ 0x3555, /* mplla cfg8 */
+ 0x0001, /* mplla cfg9 */
+ },
+};
+
+static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = {
+ &mtl_c20_dp_rbr,
+ &mtl_c20_dp_hbr1,
+ &mtl_c20_dp_hbr2,
+ &mtl_c20_dp_hbr3,
+ &mtl_c20_dp_uhbr10,
+ &mtl_c20_dp_uhbr13_5,
+ &mtl_c20_dp_uhbr20,
+ NULL,
+};
+
+/*
+ * HDMI link rates with 38.4 MHz reference clock.
+ */
+
+static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = {
+ .clock = 25200,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x4,
+ .pll[1] = 0,
+ .pll[2] = 0xB2,
+ .pll[3] = 0,
+ .pll[4] = 0,
+ .pll[5] = 0,
+ .pll[6] = 0,
+ .pll[7] = 0,
+ .pll[8] = 0x20,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0xD,
+ .pll[16] = 0x6,
+ .pll[17] = 0x8F,
+ .pll[18] = 0x84,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = {
+ .clock = 27000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34,
+ .pll[1] = 0,
+ .pll[2] = 0xC0,
+ .pll[3] = 0,
+ .pll[4] = 0,
+ .pll[5] = 0,
+ .pll[6] = 0,
+ .pll[7] = 0,
+ .pll[8] = 0x20,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0x80,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0xD,
+ .pll[16] = 0x6,
+ .pll[17] = 0xCF,
+ .pll[18] = 0x84,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = {
+ .clock = 74250,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4,
+ .pll[1] = 0,
+ .pll[2] = 0x7A,
+ .pll[3] = 0,
+ .pll[4] = 0,
+ .pll[5] = 0,
+ .pll[6] = 0,
+ .pll[7] = 0,
+ .pll[8] = 0x20,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0x58,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0xB,
+ .pll[16] = 0x6,
+ .pll[17] = 0xF,
+ .pll[18] = 0x85,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = {
+ .clock = 148500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4,
+ .pll[1] = 0,
+ .pll[2] = 0x7A,
+ .pll[3] = 0,
+ .pll[4] = 0,
+ .pll[5] = 0,
+ .pll[6] = 0,
+ .pll[7] = 0,
+ .pll[8] = 0x20,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0x58,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0xA,
+ .pll[16] = 0x6,
+ .pll[17] = 0xF,
+ .pll[18] = 0x85,
+ .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_594 = {
+ .clock = 594000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4,
+ .pll[1] = 0,
+ .pll[2] = 0x7A,
+ .pll[3] = 0,
+ .pll[4] = 0,
+ .pll[5] = 0,
+ .pll[6] = 0,
+ .pll[7] = 0,
+ .pll[8] = 0x20,
+ .pll[9] = 0x1,
+ .pll[10] = 0,
+ .pll[11] = 0,
+ .pll[12] = 0x58,
+ .pll[13] = 0,
+ .pll[14] = 0,
+ .pll[15] = 0x8,
+ .pll[16] = 0x6,
+ .pll[17] = 0xF,
+ .pll[18] = 0x85,
+ .pll[19] = 0x23,
+};
+
+/* Precomputed C10 HDMI PLL tables */
+static const struct intel_c10pll_state mtl_c10_hdmi_27027 = {
+ .clock = 27027,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xCC, .pll[12] = 0x9C, .pll[13] = 0xCB, .pll[14] = 0xCC,
+ .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_28320 = {
+ .clock = 28320,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_30240 = {
+ .clock = 30240,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0D, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_31500 = {
+ .clock = 31500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xA0, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0C, .pll[16] = 0x09, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_36000 = {
+ .clock = 36000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x00, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_40000 = {
+ .clock = 40000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x55, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_49500 = {
+ .clock = 49500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_50000 = {
+ .clock = 50000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x2A, .pll[13] = 0xA9, .pll[14] = 0xAA,
+ .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_57284 = {
+ .clock = 57284,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x77, .pll[12] = 0x57, .pll[13] = 0x77, .pll[14] = 0x77,
+ .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_58000 = {
+ .clock = 58000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xD5, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0C, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_65000 = {
+ .clock = 65000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xB5, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_71000 = {
+ .clock = 71000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_74176 = {
+ .clock = 74176,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_75000 = {
+ .clock = 75000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_78750 = {
+ .clock = 78750,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x08, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_85500 = {
+ .clock = 85500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x10, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_88750 = {
+ .clock = 88750,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x72, .pll[13] = 0xA9, .pll[14] = 0xAA,
+ .pll[15] = 0x0B, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_106500 = {
+ .clock = 106500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xF0, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_108000 = {
+ .clock = 108000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x80, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_115500 = {
+ .clock = 115500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_119000 = {
+ .clock = 119000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xF5, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0B, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_135000 = {
+ .clock = 135000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x50, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0A, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_138500 = {
+ .clock = 138500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x22, .pll[13] = 0xA9, .pll[14] = 0xAA,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_147160 = {
+ .clock = 147160,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0xA5, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_148352 = {
+ .clock = 148352,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x44, .pll[12] = 0x44, .pll[13] = 0x44, .pll[14] = 0x44,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_154000 = {
+ .clock = 154000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x35, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_162000 = {
+ .clock = 162000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x60, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_167000 = {
+ .clock = 167000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0xFA, .pll[13] = 0xA9, .pll[14] = 0xAA,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_197802 = {
+ .clock = 197802,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x99, .pll[12] = 0x05, .pll[13] = 0x98, .pll[14] = 0x99,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_198000 = {
+ .clock = 198000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x20, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_209800 = {
+ .clock = 209800,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x45, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_241500 = {
+ .clock = 241500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xC8, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x0A, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_262750 = {
+ .clock = 262750,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x6C, .pll[13] = 0xA9, .pll[14] = 0xAA,
+ .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_268500 = {
+ .clock = 268500,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0xEC, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x09, .pll[16] = 0x09, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_296703 = {
+ .clock = 296703,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x33, .pll[12] = 0x44, .pll[13] = 0x33, .pll[14] = 0x33,
+ .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_297000 = {
+ .clock = 297000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x00, .pll[12] = 0x58, .pll[13] = 0x00, .pll[14] = 0x00,
+ .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_319750 = {
+ .clock = 319750,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0xAA, .pll[12] = 0x44, .pll[13] = 0xA9, .pll[14] = 0xAA,
+ .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_497750 = {
+ .clock = 497750,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x9F, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x09, .pll[16] = 0x08, .pll[17] = 0xCF, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_592000 = {
+ .clock = 592000,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x55, .pll[12] = 0x15, .pll[13] = 0x55, .pll[14] = 0x55,
+ .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state mtl_c10_hdmi_593407 = {
+ .clock = 593407,
+ .tx = 0x10,
+ .cmn = 0x1,
+ .pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
+ .pll[5] = 0x00, .pll[6] = 0x00, .pll[7] = 0x00, .pll[8] = 0x20, .pll[9] = 0xFF,
+ .pll[10] = 0xFF, .pll[11] = 0x3B, .pll[12] = 0x44, .pll[13] = 0xBA, .pll[14] = 0xBB,
+ .pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
+};
+
+static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
+ &mtl_c10_hdmi_25_2, /* Consolidated Table */
+ &mtl_c10_hdmi_27_0, /* Consolidated Table */
+ &mtl_c10_hdmi_27027,
+ &mtl_c10_hdmi_28320,
+ &mtl_c10_hdmi_30240,
+ &mtl_c10_hdmi_31500,
+ &mtl_c10_hdmi_36000,
+ &mtl_c10_hdmi_40000,
+ &mtl_c10_hdmi_49500,
+ &mtl_c10_hdmi_50000,
+ &mtl_c10_hdmi_57284,
+ &mtl_c10_hdmi_58000,
+ &mtl_c10_hdmi_65000,
+ &mtl_c10_hdmi_71000,
+ &mtl_c10_hdmi_74176,
+ &mtl_c10_hdmi_74_25, /* Consolidated Table */
+ &mtl_c10_hdmi_75000,
+ &mtl_c10_hdmi_78750,
+ &mtl_c10_hdmi_85500,
+ &mtl_c10_hdmi_88750,
+ &mtl_c10_hdmi_106500,
+ &mtl_c10_hdmi_108000,
+ &mtl_c10_hdmi_115500,
+ &mtl_c10_hdmi_119000,
+ &mtl_c10_hdmi_135000,
+ &mtl_c10_hdmi_138500,
+ &mtl_c10_hdmi_147160,
+ &mtl_c10_hdmi_148352,
+ &mtl_c10_hdmi_148_5, /* Consolidated Table */
+ &mtl_c10_hdmi_154000,
+ &mtl_c10_hdmi_162000,
+ &mtl_c10_hdmi_167000,
+ &mtl_c10_hdmi_197802,
+ &mtl_c10_hdmi_198000,
+ &mtl_c10_hdmi_209800,
+ &mtl_c10_hdmi_241500,
+ &mtl_c10_hdmi_262750,
+ &mtl_c10_hdmi_268500,
+ &mtl_c10_hdmi_296703,
+ &mtl_c10_hdmi_297000,
+ &mtl_c10_hdmi_319750,
+ &mtl_c10_hdmi_497750,
+ &mtl_c10_hdmi_592000,
+ &mtl_c10_hdmi_593407,
+ &mtl_c10_hdmi_594, /* Consolidated Table */
+ NULL,
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
+ .link_bit_rate = 25175,
+ .clock = 25175,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0xa0d2, /* mpllb cfg0 */
+ 0x7d80, /* mpllb cfg1 */
+ 0x0906, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x0200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x0000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0001, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
+ .link_bit_rate = 27000,
+ .clock = 27000,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0xa0e0, /* mpllb cfg0 */
+ 0x7d80, /* mpllb cfg1 */
+ 0x0906, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x8000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0001, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
+ .link_bit_rate = 74250,
+ .clock = 74250,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x609a, /* mpllb cfg0 */
+ 0x7d40, /* mpllb cfg1 */
+ 0xca06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x5800, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0001, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
+ .link_bit_rate = 148500,
+ .clock = 148500,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x409a, /* mpllb cfg0 */
+ 0x7d20, /* mpllb cfg1 */
+ 0xca06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x5800, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0001, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
+ .link_bit_rate = 594000,
+ .clock = 594000,
+ .tx = { 0xbe88, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x009a, /* mpllb cfg0 */
+ 0x7d08, /* mpllb cfg1 */
+ 0xca06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x5800, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0001, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
+ .link_bit_rate = 3000000,
+ .clock = 166670,
+ .tx = { 0xbe98, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x209c, /* mpllb cfg0 */
+ 0x7d10, /* mpllb cfg1 */
+ 0xca06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x2000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0004, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
+ .link_bit_rate = 6000000,
+ .clock = 333330,
+ .tx = { 0xbe98, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x009c, /* mpllb cfg0 */
+ 0x7d08, /* mpllb cfg1 */
+ 0xca06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x2000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0004, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
+ .link_bit_rate = 8000000,
+ .clock = 444440,
+ .tx = { 0xbe98, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x00d0, /* mpllb cfg0 */
+ 0x7d08, /* mpllb cfg1 */
+ 0x4a06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0003, /* mpllb cfg7 */
+ 0x2aaa, /* mpllb cfg8 */
+ 0x0002, /* mpllb cfg9 */
+ 0x0004, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
+ .link_bit_rate = 10000000,
+ .clock = 555560,
+ .tx = { 0xbe98, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x1104, /* mpllb cfg0 */
+ 0x7d08, /* mpllb cfg1 */
+ 0x0a06, /* mpllb cfg2 */
+ 0xbe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0003, /* mpllb cfg7 */
+ 0x3555, /* mpllb cfg8 */
+ 0x0001, /* mpllb cfg9 */
+ 0x0004, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
+ .link_bit_rate = 12000000,
+ .clock = 666670,
+ .tx = { 0xbe98, /* tx cfg0 */
+ 0x9800, /* tx cfg1 */
+ 0x0000, /* tx cfg2 */
+ },
+ .cmn = { 0x0500, /* cmn cfg0*/
+ 0x0005, /* cmn cfg1 */
+ 0x0000, /* cmn cfg2 */
+ 0x0000, /* cmn cfg3 */
+ },
+ .mpllb = { 0x0138, /* mpllb cfg0 */
+ 0x7d08, /* mpllb cfg1 */
+ 0x5486, /* mpllb cfg2 */
+ 0xfe40, /* mpllb cfg3 */
+ 0x0000, /* mpllb cfg4 */
+ 0x0000, /* mpllb cfg5 */
+ 0x2200, /* mpllb cfg6 */
+ 0x0001, /* mpllb cfg7 */
+ 0x4000, /* mpllb cfg8 */
+ 0x0000, /* mpllb cfg9 */
+ 0x0004, /* mpllb cfg10 */
+ },
+};
+
+static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
+ &mtl_c20_hdmi_25_175,
+ &mtl_c20_hdmi_27_0,
+ &mtl_c20_hdmi_74_25,
+ &mtl_c20_hdmi_148_5,
+ &mtl_c20_hdmi_594,
+ &mtl_c20_hdmi_300,
+ &mtl_c20_hdmi_600,
+ &mtl_c20_hdmi_800,
+ &mtl_c20_hdmi_1000,
+ &mtl_c20_hdmi_1200,
+ NULL,
+};
+
+static int intel_c10_phy_check_hdmi_link_rate(int clock)
+{
+ const struct intel_c10pll_state * const *tables = mtl_c10_hdmi_tables;
+ int i;
+
+ for (i = 0; tables[i]; i++) {
+ if (clock == tables[i]->clock)
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+
+static const struct intel_c10pll_state * const *
+intel_c10pll_tables_get(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return mtl_c10_edp_tables;
+ else
+ return mtl_c10_dp_tables;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return mtl_c10_hdmi_tables;
+ }
+
+ MISSING_CASE(encoder->type);
+ return NULL;
+}
+
+static void intel_c10pll_update_pll(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_cx0pll_state *pll_state = &crtc_state->cx0pll_state;
+ int i;
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_panel_use_ssc(i915)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ pll_state->ssc_enabled =
+ (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+ }
+ }
+
+ if (pll_state->ssc_enabled)
+ return;
+
+ drm_WARN_ON(&i915->drm, ARRAY_SIZE(pll_state->c10.pll) < 9);
+ for (i = 4; i < 9; i++)
+ pll_state->c10.pll[i] = 0;
+}
+
+static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_c10pll_state * const *tables;
+ int i;
+
+ tables = intel_c10pll_tables_get(crtc_state, encoder);
+ if (!tables)
+ return -EINVAL;
+
+ for (i = 0; tables[i]; i++) {
+ if (crtc_state->port_clock == tables[i]->clock) {
+ crtc_state->cx0pll_state.c10 = *tables[i];
+ intel_c10pll_update_pll(crtc_state, encoder);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c10pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 lane = INTEL_CX0_LANE0;
+ intel_wakeref_t wakeref;
+ int i;
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ /*
+ * According to C10 VDR Register programming Sequence we need
+ * to do this to read PHY internal registers from MsgBus.
+ */
+ intel_cx0_rmw(i915, encoder->port, lane, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_MSGBUS_ACCESS,
+ MB_WRITE_COMMITTED);
+
+ for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
+ pll_state->pll[i] = intel_cx0_read(i915, encoder->port, lane,
+ PHY_C10_VDR_PLL(i));
+
+ pll_state->cmn = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_CMN(0));
+ pll_state->tx = intel_cx0_read(i915, encoder->port, lane, PHY_C10_VDR_TX(0));
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
+static void intel_c10_pll_program(struct drm_i915_private *i915,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_c10pll_state *pll_state = &crtc_state->cx0pll_state.c10;
+ int i;
+
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_MSGBUS_ACCESS,
+ MB_WRITE_COMMITTED);
+
+ /* Custom width needs to be programmed to 0 for both the phy lanes */
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+ C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+ MB_WRITE_COMMITTED);
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_UPDATE_CFG,
+ MB_WRITE_COMMITTED);
+
+ /* Program the pll values only for the master lane */
+ for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
+ intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+ pll_state->pll[i],
+ (i % 4) ? MB_WRITE_UNCOMMITTED : MB_WRITE_COMMITTED);
+
+ intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+ intel_cx0_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+
+ intel_cx0_rmw(i915, encoder->port, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
+ MB_WRITE_COMMITTED);
+}
+
+void intel_c10pll_dump_hw_state(struct drm_i915_private *i915,
+ const struct intel_c10pll_state *hw_state)
+{
+ bool fracen;
+ int i;
+ unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+ unsigned int multiplier, tx_clk_div;
+
+ fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
+ drm_dbg_kms(&i915->drm, "c10pll_hw_state: fracen: %s, ",
+ str_yes_no(fracen));
+
+ if (fracen) {
+ frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
+ frac_rem = hw_state->pll[14] << 8 | hw_state->pll[13];
+ frac_den = hw_state->pll[10] << 8 | hw_state->pll[9];
+ drm_dbg_kms(&i915->drm, "quot: %u, rem: %u, den: %u,\n",
+ frac_quot, frac_rem, frac_den);
+ }
+
+ multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, hw_state->pll[3]) << 8 |
+ hw_state->pll[2]) / 2 + 16;
+ tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, hw_state->pll[15]);
+ drm_dbg_kms(&i915->drm,
+ "multiplier: %u, tx_clk_div: %u.\n", multiplier, tx_clk_div);
+
+ drm_dbg_kms(&i915->drm, "c10pll_rawhw_state:");
+ drm_dbg_kms(&i915->drm, "tx: 0x%x, cmn: 0x%x\n", hw_state->tx, hw_state->cmn);
+
+ BUILD_BUG_ON(ARRAY_SIZE(hw_state->pll) % 4);
+ for (i = 0; i < ARRAY_SIZE(hw_state->pll); i = i + 4)
+ drm_dbg_kms(&i915->drm, "pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x, pll[%d] = 0x%x\n",
+ i, hw_state->pll[i], i + 1, hw_state->pll[i + 1],
+ i + 2, hw_state->pll[i + 2], i + 3, hw_state->pll[i + 3]);
+}
+
+static int intel_c20_phy_check_hdmi_link_rate(int clock)
+{
+ const struct intel_c20pll_state * const *tables = mtl_c20_hdmi_tables;
+ int i;
+
+ for (i = 0; tables[i]; i++) {
+ if (clock == tables[i]->link_bit_rate)
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+
+int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock)
+{
+ struct intel_digital_port *dig_port = hdmi_to_dig_port(hdmi);
+ struct drm_i915_private *i915 = intel_hdmi_to_i915(hdmi);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_is_c10phy(i915, phy))
+ return intel_c10_phy_check_hdmi_link_rate(clock);
+ return intel_c20_phy_check_hdmi_link_rate(clock);
+}
+
+static const struct intel_c20pll_state * const *
+intel_c20_pll_tables_get(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ return mtl_c20_dp_tables;
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return mtl_c20_hdmi_tables;
+
+ MISSING_CASE(encoder->type);
+ return NULL;
+}
+
+static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_c20pll_state * const *tables;
+ int i;
+
+ tables = intel_c20_pll_tables_get(crtc_state, encoder);
+ if (!tables)
+ return -EINVAL;
+
+ for (i = 0; tables[i]; i++) {
+ if (crtc_state->port_clock == tables[i]->link_bit_rate) {
+ crtc_state->cx0pll_state.c20 = *tables[i];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ return intel_c10pll_calc_state(crtc_state, encoder);
+ return intel_c20pll_calc_state(crtc_state, encoder);
+}
+
+static bool intel_c20_use_mplla(u32 clock)
+{
+ /* 10G and 20G rates use MPLLA */
+ if (clock == 312500 || clock == 625000)
+ return true;
+
+ return false;
+}
+
+void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c20pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ bool cntx;
+ intel_wakeref_t wakeref;
+ int i;
+
+ wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ /* 1. Read current context selection */
+ cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & PHY_C20_CONTEXT_TOGGLE;
+
+ /* Read Tx configuration */
+ for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
+ if (cntx)
+ pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_B_TX_CNTX_CFG(i));
+ else
+ pll_state->tx[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_A_TX_CNTX_CFG(i));
+ }
+
+ /* Read common configuration */
+ for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
+ if (cntx)
+ pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_B_CMN_CNTX_CFG(i));
+ else
+ pll_state->cmn[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_A_CMN_CNTX_CFG(i));
+ }
+
+ if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
+ /* MPLLB configuration */
+ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
+ if (cntx)
+ pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_B_MPLLB_CNTX_CFG(i));
+ else
+ pll_state->mpllb[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_A_MPLLB_CNTX_CFG(i));
+ }
+ } else {
+ /* MPLLA configuration */
+ for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
+ if (cntx)
+ pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_B_MPLLA_CNTX_CFG(i));
+ else
+ pll_state->mplla[i] = intel_c20_sram_read(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_A_MPLLA_CNTX_CFG(i));
+ }
+ }
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
+ const struct intel_c20pll_state *hw_state)
+{
+ int i;
+
+ drm_dbg_kms(&i915->drm, "c20pll_hw_state:\n");
+ drm_dbg_kms(&i915->drm, "tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n",
+ hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]);
+ drm_dbg_kms(&i915->drm, "cmn[0] = 0x%.4x, cmn[1] = 0x%.4x, cmn[2] = 0x%.4x, cmn[3] = 0x%.4x\n",
+ hw_state->cmn[0], hw_state->cmn[1], hw_state->cmn[2], hw_state->cmn[3]);
+
+ if (intel_c20_use_mplla(hw_state->clock)) {
+ for (i = 0; i < ARRAY_SIZE(hw_state->mplla); i++)
+ drm_dbg_kms(&i915->drm, "mplla[%d] = 0x%.4x\n", i, hw_state->mplla[i]);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(hw_state->mpllb); i++)
+ drm_dbg_kms(&i915->drm, "mpllb[%d] = 0x%.4x\n", i, hw_state->mpllb[i]);
+ }
+}
+
+static u8 intel_c20_get_dp_rate(u32 clock)
+{
+ switch (clock) {
+ case 162000: /* 1.62 Gbps DP1.4 */
+ return 0;
+ case 270000: /* 2.7 Gbps DP1.4 */
+ return 1;
+ case 540000: /* 5.4 Gbps DP 1.4 */
+ return 2;
+ case 810000: /* 8.1 Gbps DP1.4 */
+ return 3;
+ case 216000: /* 2.16 Gbps eDP */
+ return 4;
+ case 243000: /* 2.43 Gbps eDP */
+ return 5;
+ case 324000: /* 3.24 Gbps eDP */
+ return 6;
+ case 432000: /* 4.32 Gbps eDP */
+ return 7;
+ case 312500: /* 10 Gbps DP2.0 */
+ return 8;
+ case 421875: /* 13.5 Gbps DP2.0 */
+ return 9;
+ case 625000: /* 20 Gbps DP2.0*/
+ return 10;
+ case 648000: /* 6.48 Gbps eDP*/
+ return 11;
+ case 675000: /* 6.75 Gbps eDP*/
+ return 12;
+ default:
+ MISSING_CASE(clock);
+ return 0;
+ }
+}
+
+static u8 intel_c20_get_hdmi_rate(u32 clock)
+{
+ switch (clock) {
+ case 25175:
+ case 27000:
+ case 74250:
+ case 148500:
+ case 594000:
+ return 0;
+ case 166670: /* 3 Gbps */
+ case 333330: /* 6 Gbps */
+ case 666670: /* 12 Gbps */
+ return 1;
+ case 444440: /* 8 Gbps */
+ return 2;
+ case 555560: /* 10 Gbps */
+ return 3;
+ default:
+ MISSING_CASE(clock);
+ return 0;
+ }
+}
+
+static bool is_dp2(u32 clock)
+{
+ /* DP2.0 clock rates */
+ if (clock == 312500 || clock == 421875 || clock == 625000)
+ return true;
+
+ return false;
+}
+
+static bool is_hdmi_frl(u32 clock)
+{
+ switch (clock) {
+ case 166670: /* 3 Gbps */
+ case 333330: /* 6 Gbps */
+ case 444440: /* 8 Gbps */
+ case 555560: /* 10 Gbps */
+ case 666670: /* 12 Gbps */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+
+ /* banks should not be cleared for DPALT/USB4/TBT modes */
+ /* TODO: optimize re-calibration in legacy mode */
+ return intel_tc_port_in_legacy_mode(intel_dig_port);
+}
+
+static int intel_get_c20_custom_width(u32 clock, bool dp)
+{
+ if (dp && is_dp2(clock))
+ return 2;
+ else if (is_hdmi_frl(clock))
+ return 1;
+ else
+ return 0;
+}
+
+static void intel_c20_pll_program(struct drm_i915_private *i915,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
+ bool dp = false;
+ int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
+ bool cntx;
+ int i;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ dp = true;
+
+ /* 1. Read current context selection */
+ cntx = intel_cx0_read(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+
+ /*
+ * 2. If there is a protocol switch from HDMI to DP or vice versa, clear
+ * the lane #0 MPLLB CAL_DONE_BANK DP2.0 10G and 20G rates enable MPLLA.
+ * Protocol switch is only applicable for MPLLA
+ */
+ if (intel_c20_protocol_switch_valid(encoder)) {
+ for (i = 0; i < 4; i++)
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(i), 0);
+ usleep_range(4000, 4100);
+ }
+
+ /* 3. Write SRAM configuration context. If A in use, write configuration to B context */
+ /* 3.1 Tx configuration */
+ for (i = 0; i < ARRAY_SIZE(pll_state->tx); i++) {
+ if (cntx)
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_TX_CNTX_CFG(i), pll_state->tx[i]);
+ else
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_TX_CNTX_CFG(i), pll_state->tx[i]);
+ }
+
+ /* 3.2 common configuration */
+ for (i = 0; i < ARRAY_SIZE(pll_state->cmn); i++) {
+ if (cntx)
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_A_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ else
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0, PHY_C20_B_CMN_CNTX_CFG(i), pll_state->cmn[i]);
+ }
+
+ /* 3.3 mpllb or mplla configuration */
+ if (intel_c20_use_mplla(pll_state->clock)) {
+ for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
+ if (cntx)
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_A_MPLLA_CNTX_CFG(i),
+ pll_state->mplla[i]);
+ else
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_B_MPLLA_CNTX_CFG(i),
+ pll_state->mplla[i]);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(pll_state->mpllb); i++) {
+ if (cntx)
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_A_MPLLB_CNTX_CFG(i),
+ pll_state->mpllb[i]);
+ else
+ intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
+ PHY_C20_B_MPLLB_CNTX_CFG(i),
+ pll_state->mpllb[i]);
+ }
+ }
+
+ /* 4. Program custom width to match the link protocol */
+ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
+ PHY_C20_CUSTOM_WIDTH_MASK,
+ PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)),
+ MB_WRITE_COMMITTED);
+
+ /* 5. For DP or 6. For HDMI */
+ if (dp) {
+ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
+ BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)),
+ MB_WRITE_COMMITTED);
+ } else {
+ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
+ is_hdmi_frl(pll_state->clock) ? BIT(7) : 0,
+ MB_WRITE_COMMITTED);
+
+ intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ intel_c20_get_hdmi_rate(pll_state->clock),
+ MB_WRITE_COMMITTED);
+ }
+
+ /*
+ * 7. Write Vendor specific registers to toggle context setting to load
+ * the updated programming toggle context bit
+ */
+ intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
+}
+
+int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state)
+{
+ unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+ unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
+ int tmpclk = 0;
+
+ if (pll_state->pll[0] & C10_PLL0_FRACEN) {
+ frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
+ frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13];
+ frac_den = pll_state->pll[10] << 8 | pll_state->pll[9];
+ }
+
+ multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
+ pll_state->pll[2]) / 2 + 16;
+
+ tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
+ hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]);
+
+ tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
+ DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
+ 10 << (tx_clk_div + 16));
+ tmpclk *= (hdmi_div ? 2 : 1);
+
+ return tmpclk;
+}
+
+int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state)
+{
+ unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
+ unsigned int multiplier, refclk = 38400;
+ unsigned int tx_clk_div;
+ unsigned int ref_clk_mpllb_div;
+ unsigned int fb_clk_div4_en;
+ unsigned int ref, vco;
+ unsigned int tx_rate_mult;
+ unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
+
+ if (pll_state->tx[0] & C20_PHY_USE_MPLLB) {
+ tx_rate_mult = 1;
+ frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
+ frac_quot = pll_state->mpllb[8];
+ frac_rem = pll_state->mpllb[9];
+ frac_den = pll_state->mpllb[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
+ fb_clk_div4_en = 0;
+ } else {
+ tx_rate_mult = 2;
+ frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
+ frac_quot = pll_state->mplla[8];
+ frac_rem = pll_state->mplla[9];
+ frac_den = pll_state->mplla[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
+ fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
+ }
+
+ if (frac_en)
+ frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
+ else
+ frac = 0;
+
+ ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
+
+ return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
+}
+
+static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool lane_reversal)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 val = 0;
+
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port), XELPDP_PORT_REVERSAL,
+ lane_reversal ? XELPDP_PORT_REVERSAL : 0);
+
+ if (lane_reversal)
+ val |= XELPDP_LANE1_PHY_CLOCK_SELECT;
+
+ val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
+ if (is_hdmi_frl(crtc_state->port_clock))
+ val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ else
+ val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+
+ /* TODO: HDMI FRL */
+ /* DP2.0 10G and 20G rates enable MPLLA*/
+ if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
+ val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
+ else
+ val |= crtc_state->cx0pll_state.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
+ XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_SSC_ENABLE_PLLB, val);
+}
+
+static u32 intel_cx0_get_powerdown_update(u8 lane_mask)
+{
+ u32 val = 0;
+ int lane = 0;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ val |= XELPDP_LANE_POWERDOWN_UPDATE(lane);
+
+ return val;
+}
+
+static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
+{
+ u32 val = 0;
+ int lane = 0;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ val |= XELPDP_LANE_POWERDOWN_NEW_STATE(lane, state);
+
+ return val;
+}
+
+static void intel_cx0_powerdown_change_sequence(struct drm_i915_private *i915,
+ enum port port,
+ u8 lane_mask, u8 state)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+ int lane;
+
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_cx0_get_powerdown_state(INTEL_CX0_BOTH_LANES, XELPDP_LANE_POWERDOWN_NEW_STATE_MASK),
+ intel_cx0_get_powerdown_state(lane_mask, state));
+
+ /* Wait for pending transactions.*/
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_M2P_MSGBUS_CTL(port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ drm_dbg_kms(&i915->drm,
+ "PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
+ phy_name(phy));
+ intel_cx0_bus_reset(i915, port, lane);
+ }
+
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_cx0_get_powerdown_update(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_powerdown_update(lane_mask));
+
+ /* Update Timeout Value */
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+ intel_cx0_get_powerdown_update(lane_mask), 0,
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
+ phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+}
+
+static void intel_cx0_setup_powerdown(struct drm_i915_private *i915, enum port port)
+{
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ XELPDP_POWER_STATE_READY_MASK,
+ XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL3(port),
+ XELPDP_POWER_STATE_ACTIVE_MASK |
+ XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
+ XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
+ XELPDP_PLL_LANE_STAGGERING_DELAY(0));
+}
+
+static u32 intel_cx0_get_pclk_refclk_request(u8 lane_mask)
+{
+ u32 val = 0;
+ int lane = 0;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ val |= XELPDP_LANE_PCLK_REFCLK_REQUEST(lane);
+
+ return val;
+}
+
+static u32 intel_cx0_get_pclk_refclk_ack(u8 lane_mask)
+{
+ u32 val = 0;
+ int lane = 0;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ val |= XELPDP_LANE_PCLK_REFCLK_ACK(lane);
+
+ return val;
+}
+
+/* FIXME: Some Type-C cases need not reset both the lanes. Handle those cases. */
+static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915, enum port port,
+ bool lane_reversal)
+{
+ enum phy phy = intel_port_to_phy(i915, port);
+ u8 lane_mask = lane_reversal ? INTEL_CX0_LANE1 :
+ INTEL_CX0_LANE0;
+
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL1(port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
+ phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
+
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
+ XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1));
+
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
+ XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1),
+ XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1),
+ XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dus.\n",
+ phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(port),
+ intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_refclk_request(lane_mask));
+
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(port),
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ drm_warn(&i915->drm, "PHY %c failed to request refclk after %dus.\n",
+ phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
+
+ intel_cx0_powerdown_change_sequence(i915, port, INTEL_CX0_BOTH_LANES,
+ CX0_P2_STATE_RESET);
+ intel_cx0_setup_powerdown(i915, port);
+
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
+ XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
+ 0);
+
+ if (intel_de_wait_for_clear(i915, XELPDP_PORT_BUF_CTL2(port),
+ XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1),
+ XELPDP_PORT_RESET_END_TIMEOUT))
+ drm_warn(&i915->drm, "PHY %c failed to bring out of Lane reset after %dms.\n",
+ phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
+}
+
+static void intel_cx0_program_phy_lane(struct drm_i915_private *i915,
+ struct intel_encoder *encoder, int lane_count,
+ bool lane_reversal)
+{
+ u8 l0t1, l0t2, l1t1, l1t2;
+ bool dp_alt_mode = intel_tc_port_in_dp_alt_mode(enc_to_dig_port(encoder));
+ enum port port = encoder->port;
+
+ if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
+ intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES,
+ PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_MSGBUS_ACCESS,
+ MB_WRITE_COMMITTED);
+
+ /* TODO: DP-alt MFD case where only one PHY lane should be programmed. */
+ l0t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2));
+ l0t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2));
+ l1t1 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2));
+ l1t2 = intel_cx0_read(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2));
+
+ l0t1 |= CONTROL2_DISABLE_SINGLE_TX;
+ l0t2 |= CONTROL2_DISABLE_SINGLE_TX;
+ l1t1 |= CONTROL2_DISABLE_SINGLE_TX;
+ l1t2 |= CONTROL2_DISABLE_SINGLE_TX;
+
+ if (lane_reversal) {
+ switch (lane_count) {
+ case 4:
+ l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ fallthrough;
+ case 3:
+ l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ fallthrough;
+ case 2:
+ l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ fallthrough;
+ case 1:
+ l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ }
+ } else {
+ switch (lane_count) {
+ case 4:
+ l1t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ fallthrough;
+ case 3:
+ l1t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ fallthrough;
+ case 2:
+ l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ break;
+ case 1:
+ if (dp_alt_mode)
+ l0t2 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ else
+ l0t1 &= ~CONTROL2_DISABLE_SINGLE_TX;
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ }
+ }
+
+ /* disable MLs */
+ intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(1, 2),
+ l0t1, MB_WRITE_COMMITTED);
+ intel_cx0_write(i915, port, INTEL_CX0_LANE0, PHY_CX0_TX_CONTROL(2, 2),
+ l0t2, MB_WRITE_COMMITTED);
+ intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(1, 2),
+ l1t1, MB_WRITE_COMMITTED);
+ intel_cx0_write(i915, port, INTEL_CX0_LANE1, PHY_CX0_TX_CONTROL(2, 2),
+ l1t2, MB_WRITE_COMMITTED);
+
+ if (intel_is_c10phy(i915, intel_port_to_phy(i915, port)))
+ intel_cx0_rmw(i915, port, INTEL_CX0_BOTH_LANES,
+ PHY_C10_VDR_CONTROL(1), 0,
+ C10_VDR_CTRL_UPDATE_CFG,
+ MB_WRITE_COMMITTED);
+}
+
+static u32 intel_cx0_get_pclk_pll_request(u8 lane_mask)
+{
+ u32 val = 0;
+ int lane = 0;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ val |= XELPDP_LANE_PCLK_PLL_REQUEST(lane);
+
+ return val;
+}
+
+static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
+{
+ u32 val = 0;
+ int lane = 0;
+
+ for_each_cx0_lane_in_mask(lane_mask, lane)
+ val |= XELPDP_LANE_PCLK_PLL_ACK(lane);
+
+ return val;
+}
+
+static void intel_cx0pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
+ INTEL_CX0_LANE0;
+ intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ /*
+ * 1. Program PORT_CLOCK_CTL REGISTER to configure
+ * clock muxes, gating and SSC
+ */
+ intel_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+
+ /* 2. Bring PHY out of reset. */
+ intel_cx0_phy_lane_reset(i915, encoder->port, lane_reversal);
+
+ /*
+ * 3. Change Phy power state to Ready.
+ * TODO: For DP alt mode use only one lane.
+ */
+ intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ CX0_P2_STATE_READY);
+
+ /* 4. Program PHY internal PLL internal registers. */
+ if (intel_is_c10phy(i915, phy))
+ intel_c10_pll_program(i915, crtc_state, encoder);
+ else
+ intel_c20_pll_program(i915, crtc_state, encoder);
+
+ /*
+ * 5. Program the enabled and disabled owned PHY lane
+ * transmitters over message bus
+ */
+ intel_cx0_program_phy_lane(i915, encoder, crtc_state->lane_count, lane_reversal);
+
+ /*
+ * 6. Follow the Display Voltage Frequency Switching - Sequence
+ * Before Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /*
+ * 7. Program DDI_CLK_VALFREQ to match intended DDI
+ * clock frequency.
+ */
+ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
+ crtc_state->port_clock);
+
+ /*
+ * 8. Set PORT_CLOCK_CTL register PCLK PLL Request
+ * LN<Lane for maxPCLK> to "1" to enable PLL.
+ */
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_request(maxpclk_lane));
+
+ /* 9. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
+ drm_warn(&i915->drm, "Port %c PLL not locked after %dus.\n",
+ phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
+
+ /*
+ * 10. Follow the Display Voltage Frequency Switching Sequence After
+ * Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /* TODO: enable TBT-ALT mode */
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
+int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 clock;
+ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+
+ clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+
+ drm_WARN_ON(&i915->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE));
+ drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_REQUEST));
+ drm_WARN_ON(&i915->drm, !(val & XELPDP_TBT_CLOCK_ACK));
+
+ switch (clock) {
+ case XELPDP_DDI_CLOCK_SELECT_TBT_162:
+ return 162000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_270:
+ return 270000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_540:
+ return 540000;
+ case XELPDP_DDI_CLOCK_SELECT_TBT_810:
+ return 810000;
+ default:
+ MISSING_CASE(clock);
+ return 162000;
+ }
+}
+
+static int intel_mtl_tbt_clock_select(struct drm_i915_private *i915, int clock)
+{
+ switch (clock) {
+ case 162000:
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ case 270000:
+ return XELPDP_DDI_CLOCK_SELECT_TBT_270;
+ case 540000:
+ return XELPDP_DDI_CLOCK_SELECT_TBT_540;
+ case 810000:
+ return XELPDP_DDI_CLOCK_SELECT_TBT_810;
+ default:
+ MISSING_CASE(clock);
+ return XELPDP_DDI_CLOCK_SELECT_TBT_162;
+ }
+}
+
+static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ u32 val = 0;
+
+ /*
+ * 1. Program PORT_CLOCK_CTL REGISTER to configure
+ * clock muxes, gating and SSC
+ */
+ val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(i915, crtc_state->port_clock));
+ val |= XELPDP_FORWARD_CLOCK_UNGATE;
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val);
+
+ /* 2. Read back PORT_CLOCK_CTL REGISTER */
+ val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+
+ /*
+ * 3. Follow the Display Voltage Frequency Switching - Sequence
+ * Before Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /*
+ * 4. Set PORT_CLOCK_CTL register TBT CLOCK Request to "1" to enable PLL.
+ */
+ val |= XELPDP_TBT_CLOCK_REQUEST;
+ intel_de_write(i915, XELPDP_PORT_CLOCK_CTL(encoder->port), val);
+
+ /* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_TBT_CLOCK_ACK,
+ XELPDP_TBT_CLOCK_ACK,
+ 100, 0, NULL))
+ drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
+ encoder->base.base.id, encoder->base.name, phy_name(phy));
+
+ /*
+ * 6. Follow the Display Voltage Frequency Switching Sequence After
+ * Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /*
+ * 7. Program DDI_CLK_VALFREQ to match intended DDI
+ * clock frequency.
+ */
+ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port),
+ crtc_state->port_clock);
+}
+
+void intel_mtl_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_enable(encoder, crtc_state);
+ else
+ intel_cx0pll_enable(encoder, crtc_state);
+}
+
+static void intel_cx0pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ bool is_c10 = intel_is_c10phy(i915, phy);
+ intel_wakeref_t wakeref = intel_cx0_phy_transaction_begin(encoder);
+
+ /* 1. Change owned PHY lane power to Disable state. */
+ intel_cx0_powerdown_change_sequence(i915, encoder->port, INTEL_CX0_BOTH_LANES,
+ is_c10 ? CX0_P2PG_STATE_DISABLE :
+ CX0_P4PG_STATE_DISABLE);
+
+ /*
+ * 2. Follow the Display Voltage Frequency Switching Sequence Before
+ * Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /*
+ * 3. Set PORT_CLOCK_CTL register PCLK PLL Request LN<Lane for maxPCLK>
+ * to "0" to disable PLL.
+ */
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_cx0_get_pclk_pll_request(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_request(INTEL_CX0_BOTH_LANES), 0);
+
+ /* 4. Program DDI_CLK_VALFREQ to 0. */
+ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
+
+ /*
+ * 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
+ */
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
+ drm_warn(&i915->drm, "Port %c PLL not unlocked after %dus.\n",
+ phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
+
+ /*
+ * 6. Follow the Display Voltage Frequency Switching Sequence After
+ * Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /* 7. Program PORT_CLOCK_CTL register to disable and gate clocks. */
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_DDI_CLOCK_SELECT_MASK, 0);
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_FORWARD_CLOCK_UNGATE, 0);
+
+ intel_cx0_phy_transaction_end(encoder, wakeref);
+}
+
+static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ /*
+ * 1. Follow the Display Voltage Frequency Switching Sequence Before
+ * Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /*
+ * 2. Set PORT_CLOCK_CTL register TBT CLOCK Request to "0" to disable PLL.
+ */
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_TBT_CLOCK_REQUEST, 0);
+
+ /* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
+ if (__intel_de_wait_for_register(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_TBT_CLOCK_ACK,
+ ~XELPDP_TBT_CLOCK_ACK,
+ 10, 0, NULL))
+ drm_warn(&i915->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
+ encoder->base.base.id, encoder->base.name, phy_name(phy));
+
+ /*
+ * 4. Follow the Display Voltage Frequency Switching Sequence After
+ * Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+
+ /*
+ * 5. Program PORT CLOCK CTRL register to disable and gate clocks
+ */
+ intel_de_rmw(i915, XELPDP_PORT_CLOCK_CTL(encoder->port),
+ XELPDP_DDI_CLOCK_SELECT_MASK |
+ XELPDP_FORWARD_CLOCK_UNGATE, 0);
+
+ /* 6. Program DDI_CLK_VALFREQ to 0. */
+ intel_de_write(i915, DDI_CLK_VALFREQ(encoder->port), 0);
+}
+
+void intel_mtl_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_disable(encoder);
+ else
+ intel_cx0pll_disable(encoder);
+}
+
+enum icl_port_dpll_id
+intel_mtl_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ /*
+ * TODO: Determine the PLL type from the SW state, once MTL PLL
+ * handling is done via the standard shared DPLL framework.
+ */
+ u32 val = intel_de_read(i915, XELPDP_PORT_CLOCK_CTL(encoder->port));
+ u32 clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val);
+
+ if (clock == XELPDP_DDI_CLOCK_SELECT_MAXPCLK ||
+ clock == XELPDP_DDI_CLOCK_SELECT_DIV18CLK)
+ return ICL_PORT_DPLL_MG_PHY;
+ else
+ return ICL_PORT_DPLL_DEFAULT;
+}
+
+void intel_c10pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_c10pll_state mpllb_hw_state = { 0 };
+ struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_encoder *encoder;
+ enum phy phy;
+ int i;
+
+ if (DISPLAY_VER(i915) < 14)
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state))
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ phy = intel_port_to_phy(i915, encoder->port);
+
+ if (!intel_is_c10phy(i915, phy))
+ return;
+
+ intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state);
+
+ for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
+ u8 expected = mpllb_sw_state->pll[i];
+
+ I915_STATE_WARN(mpllb_hw_state.pll[i] != expected,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ i, expected, mpllb_hw_state.pll[i]);
+ }
+
+ I915_STATE_WARN(mpllb_hw_state.tx != mpllb_sw_state->tx,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->tx, mpllb_hw_state.tx);
+
+ I915_STATE_WARN(mpllb_hw_state.cmn != mpllb_sw_state->cmn,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->cmn, mpllb_hw_state.cmn);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
new file mode 100644
index 000000000000..f99809af257d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_CX0_PHY_H__
+#define __INTEL_CX0_PHY_H__
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+
+struct drm_i915_private;
+struct intel_encoder;
+struct intel_crtc_state;
+enum icl_port_dpll_id;
+enum phy;
+
+bool intel_is_c10phy(struct drm_i915_private *dev_priv, enum phy phy);
+void intel_mtl_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mtl_pll_disable(struct intel_encoder *encoder);
+enum icl_port_dpll_id
+intel_mtl_port_pll_type(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state);
+int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder);
+void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv,
+ const struct intel_c10pll_state *hw_state);
+int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state);
+void intel_c10pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc_state *new_crtc_state);
+void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c20pll_state *pll_state);
+void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
+ const struct intel_c20pll_state *hw_state);
+int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state);
+void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
+void intel_cx0_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ u32 level);
+int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
+#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
new file mode 100644
index 000000000000..ab9d1d983b88
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_CX0_PHY_REGS_H__
+#define __INTEL_CX0_PHY_REGS_H__
+
+#include "i915_reg_defs.h"
+
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A 0x64040
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B 0x64140
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1 0x16F240
+#define _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2 0x16F440
+#define XELPDP_PORT_M2P_MSGBUS_CTL(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4)
+#define XELPDP_PORT_M2P_TRANSACTION_PENDING REG_BIT(31)
+#define XELPDP_PORT_M2P_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
+#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
+#define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
+#define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16)
+#define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
+#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
+#define XELPDP_PORT_M2P_ADDRESS_MASK REG_GENMASK(11, 0)
+#define XELPDP_PORT_M2P_ADDRESS(val) REG_FIELD_PREP(XELPDP_PORT_M2P_ADDRESS_MASK, val)
+#define XELPDP_PORT_P2M_MSGBUS_STATUS(port, lane) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) + (lane) * 4 + 8)
+#define XELPDP_PORT_P2M_RESPONSE_READY REG_BIT(31)
+#define XELPDP_PORT_P2M_COMMAND_TYPE_MASK REG_GENMASK(30, 27)
+#define XELPDP_PORT_P2M_COMMAND_READ_ACK 0x4
+#define XELPDP_PORT_P2M_COMMAND_WRITE_ACK 0x5
+#define XELPDP_PORT_P2M_DATA_MASK REG_GENMASK(23, 16)
+#define XELPDP_PORT_P2M_DATA(val) REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
+#define XELPDP_PORT_P2M_ERROR_SET REG_BIT(15)
+
+#define XELPDP_MSGBUS_TIMEOUT_SLOW 1
+#define XELPDP_MSGBUS_TIMEOUT_FAST_US 2
+#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200
+#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20
+#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100
+#define XELPDP_PORT_RESET_START_TIMEOUT_US 5
+#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US 100
+#define XELPDP_PORT_RESET_END_TIMEOUT 15
+#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1
+
+#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004
+#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104
+#define _XELPDP_PORT_BUF_CTL1_LN0_USBC1 0x16F200
+#define _XELPDP_PORT_BUF_CTL1_LN0_USBC2 0x16F400
+#define XELPDP_PORT_BUF_CTL1(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_A, \
+ _XELPDP_PORT_BUF_CTL1_LN0_B, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC2))
+#define XELPDP_PORT_BUF_D2D_LINK_ENABLE REG_BIT(29)
+#define XELPDP_PORT_BUF_D2D_LINK_STATE REG_BIT(28)
+#define XELPDP_PORT_BUF_SOC_PHY_READY REG_BIT(24)
+#define XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK REG_GENMASK(19, 18)
+#define XELPDP_PORT_BUF_PORT_DATA_10BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 0)
+#define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1)
+#define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2)
+#define XELPDP_PORT_REVERSAL REG_BIT(16)
+#define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11)
+#define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7)
+#define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6)
+#define XELPDP_TCSS_POWER_REQUEST REG_BIT(5)
+#define XELPDP_TCSS_POWER_STATE REG_BIT(4)
+#define XELPDP_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define XELPDP_PORT_WIDTH(val) REG_FIELD_PREP(XELPDP_PORT_WIDTH_MASK, val)
+
+#define XELPDP_PORT_BUF_CTL2(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_A, \
+ _XELPDP_PORT_BUF_CTL1_LN0_B, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 4)
+
+#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
+#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
+#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
+#define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20)
+#define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
+#define _XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK REG_GENMASK(19, 16)
+#define _XELPDP_LANE1_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE1_POWERDOWN_NEW_STATE_MASK, val)
+#define XELPDP_LANE_POWERDOWN_NEW_STATE(lane, val) _PICK(lane, \
+ _XELPDP_LANE0_POWERDOWN_NEW_STATE(val), \
+ _XELPDP_LANE1_POWERDOWN_NEW_STATE(val))
+#define XELPDP_LANE_POWERDOWN_NEW_STATE_MASK REG_GENMASK(3, 0)
+#define XELPDP_POWER_STATE_READY_MASK REG_GENMASK(7, 4)
+#define XELPDP_POWER_STATE_READY(val) REG_FIELD_PREP(XELPDP_POWER_STATE_READY_MASK, val)
+
+#define XELPDP_PORT_BUF_CTL3(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_A, \
+ _XELPDP_PORT_BUF_CTL1_LN0_B, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) + 8)
+#define XELPDP_PLL_LANE_STAGGERING_DELAY_MASK REG_GENMASK(15, 8)
+#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
+#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
+#define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
+#define CX0_P0_STATE_ACTIVE 0x0
+#define CX0_P2_STATE_READY 0x2
+#define CX0_P2PG_STATE_DISABLE 0x9
+#define CX0_P4PG_STATE_DISABLE 0xC
+#define CX0_P2_STATE_RESET 0x2
+
+#define _XELPDP_PORT_CLOCK_CTL_A 0x640E0
+#define _XELPDP_PORT_CLOCK_CTL_B 0x641E0
+#define _XELPDP_PORT_CLOCK_CTL_USBC1 0x16F260
+#define _XELPDP_PORT_CLOCK_CTL_USBC2 0x16F460
+#define XELPDP_PORT_CLOCK_CTL(port) _MMIO(_PICK_EVEN_2RANGES(port, PORT_TC1, \
+ _XELPDP_PORT_CLOCK_CTL_A, \
+ _XELPDP_PORT_CLOCK_CTL_B, \
+ _XELPDP_PORT_CLOCK_CTL_USBC1, \
+ _XELPDP_PORT_CLOCK_CTL_USBC2))
+#define XELPDP_LANE_PCLK_PLL_REQUEST(lane) REG_BIT(31 - ((lane) * 4))
+#define XELPDP_LANE_PCLK_PLL_ACK(lane) REG_BIT(30 - ((lane) * 4))
+#define XELPDP_LANE_PCLK_REFCLK_REQUEST(lane) REG_BIT(29 - ((lane) * 4))
+#define XELPDP_LANE_PCLK_REFCLK_ACK(lane) REG_BIT(28 - ((lane) * 4))
+
+#define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19)
+#define XELPDP_TBT_CLOCK_ACK REG_BIT(18)
+#define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12)
+#define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val)
+#define XELPDP_DDI_CLOCK_SELECT_NONE 0x0
+#define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8
+#define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9
+#define XELPDP_DDI_CLOCK_SELECT_TBT_162 0xc
+#define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd
+#define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe
+#define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf
+#define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10)
+#define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8)
+#define XELPDP_SSC_ENABLE_PLLA REG_BIT(1)
+#define XELPDP_SSC_ENABLE_PLLB REG_BIT(0)
+
+/* C10 Vendor Registers */
+#define PHY_C10_VDR_PLL(idx) (0xC00 + (idx))
+#define C10_PLL0_FRACEN REG_BIT8(4)
+#define C10_PLL3_MULTIPLIERH_MASK REG_GENMASK8(3, 0)
+#define C10_PLL15_TXCLKDIV_MASK REG_GENMASK8(2, 0)
+#define C10_PLL15_HDMIDIV_MASK REG_GENMASK8(5, 3)
+
+#define PHY_C10_VDR_CMN(idx) (0xC20 + (idx))
+#define C10_CMN0_REF_RANGE REG_FIELD_PREP(REG_GENMASK(4, 0), 1)
+#define C10_CMN0_REF_CLK_MPLLB_DIV REG_FIELD_PREP(REG_GENMASK(7, 5), 1)
+#define C10_CMN3_TXVBOOST_MASK REG_GENMASK8(7, 5)
+#define C10_CMN3_TXVBOOST(val) REG_FIELD_PREP8(C10_CMN3_TXVBOOST_MASK, val)
+#define PHY_C10_VDR_TX(idx) (0xC30 + (idx))
+#define C10_TX0_TX_MPLLB_SEL REG_BIT(4)
+#define C10_TX1_TERMCTL_MASK REG_GENMASK8(7, 5)
+#define C10_TX1_TERMCTL(val) REG_FIELD_PREP8(C10_TX1_TERMCTL_MASK, val)
+#define PHY_C10_VDR_CONTROL(idx) (0xC70 + (idx) - 1)
+#define C10_VDR_CTRL_MSGBUS_ACCESS REG_BIT8(2)
+#define C10_VDR_CTRL_MASTER_LANE REG_BIT8(1)
+#define C10_VDR_CTRL_UPDATE_CFG REG_BIT8(0)
+#define PHY_C10_VDR_CUSTOM_WIDTH 0xD02
+#define C10_VDR_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0)
+#define C10_VDR_CUSTOM_WIDTH_8_10 REG_FIELD_PREP(C10_VDR_CUSTOM_WIDTH_MASK, 0)
+#define PHY_C10_VDR_OVRD 0xD71
+#define PHY_C10_VDR_OVRD_TX1 REG_BIT8(0)
+#define PHY_C10_VDR_OVRD_TX2 REG_BIT8(2)
+#define PHY_C10_VDR_PRE_OVRD_TX1 0xD80
+#define C10_PHY_OVRD_LEVEL_MASK REG_GENMASK8(5, 0)
+#define C10_PHY_OVRD_LEVEL(val) REG_FIELD_PREP8(C10_PHY_OVRD_LEVEL_MASK, val)
+#define PHY_CX0_VDROVRD_CTL(lane, tx, control) \
+ (PHY_C10_VDR_PRE_OVRD_TX1 + \
+ ((lane) ^ (tx)) * 0x10 + (control))
+
+/* PIPE SPEC Defined Registers */
+#define PHY_CX0_TX_CONTROL(tx, control) (0x400 + ((tx) - 1) * 0x200 + (control))
+#define CONTROL2_DISABLE_SINGLE_TX REG_BIT(6)
+
+/* C20 Registers */
+#define PHY_C20_WR_ADDRESS_L 0xC02
+#define PHY_C20_WR_ADDRESS_H 0xC03
+#define PHY_C20_WR_DATA_L 0xC04
+#define PHY_C20_WR_DATA_H 0xC05
+#define PHY_C20_RD_ADDRESS_L 0xC06
+#define PHY_C20_RD_ADDRESS_H 0xC07
+#define PHY_C20_RD_DATA_L 0xC08
+#define PHY_C20_RD_DATA_H 0xC09
+#define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00
+#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0)
+#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1)
+#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val)
+#define PHY_C20_VDR_CUSTOM_WIDTH 0xD02
+#define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0)
+#define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val)
+#define PHY_C20_A_TX_CNTX_CFG(idx) (0xCF2E - (idx))
+#define PHY_C20_B_TX_CNTX_CFG(idx) (0xCF2A - (idx))
+#define C20_PHY_TX_RATE REG_GENMASK(2, 0)
+#define PHY_C20_A_CMN_CNTX_CFG(idx) (0xCDAA - (idx))
+#define PHY_C20_B_CMN_CNTX_CFG(idx) (0xCDA5 - (idx))
+#define PHY_C20_A_MPLLA_CNTX_CFG(idx) (0xCCF0 - (idx))
+#define PHY_C20_B_MPLLA_CNTX_CFG(idx) (0xCCE5 - (idx))
+#define C20_MPLLA_FRACEN REG_BIT(14)
+#define C20_FB_CLK_DIV4_EN REG_BIT(13)
+#define C20_MPLLA_TX_CLK_DIV_MASK REG_GENMASK(10, 8)
+#define PHY_C20_A_MPLLB_CNTX_CFG(idx) (0xCB5A - (idx))
+#define PHY_C20_B_MPLLB_CNTX_CFG(idx) (0xCB4E - (idx))
+#define C20_MPLLB_TX_CLK_DIV_MASK REG_GENMASK(15, 13)
+#define C20_MPLLB_FRACEN REG_BIT(13)
+#define C20_REF_CLK_MPLLB_DIV_MASK REG_GENMASK(12, 10)
+#define C20_MULTIPLIER_MASK REG_GENMASK(11, 0)
+#define C20_PHY_USE_MPLLB REG_BIT(7)
+
+/* C20 Phy VSwing Masks */
+#define C20_PHY_VSWING_PREEMPH_MASK REG_GENMASK8(5, 0)
+#define C20_PHY_VSWING_PREEMPH(val) REG_FIELD_PREP8(C20_PHY_VSWING_PREEMPH_MASK, val)
+
+#define RAWLANEAONX_DIG_TX_MPLLB_CAL_DONE_BANK(idx) (0x303D + (idx))
+
+#endif /* __INTEL_CX0_REG_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 3a7b98837516..0ba5492c0604 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -39,6 +39,8 @@
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
+#include "intel_cx0_phy.h"
+#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
@@ -68,7 +70,6 @@
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vdsc_regs.h"
-#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -169,6 +170,18 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
trans->entries[level].hsw.trans2);
}
+static void mtl_wait_ddi_buf_idle(struct drm_i915_private *i915, enum port port)
+{
+ int ret;
+
+ /* FIXME: find out why Bspec's 100us timeout is too short */
+ ret = wait_for_us((intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) &
+ XELPDP_PORT_BUF_PHY_IDLE), 10000);
+ if (ret)
+ drm_err(&i915->drm, "Timeout waiting for DDI BUF %c to get idle\n",
+ port_name(port));
+}
+
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
@@ -196,7 +209,9 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
return;
}
- if (IS_DG2(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ timeout_us = 10000;
+ } else if (IS_DG2(dev_priv)) {
timeout_us = 1200;
} else if (DISPLAY_VER(dev_priv) >= 12) {
if (intel_phy_is_tc(dev_priv, phy))
@@ -207,8 +222,12 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
timeout_us = 500;
}
- ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE), timeout_us, 10, 10);
+ if (DISPLAY_VER(dev_priv) >= 14)
+ ret = _wait_for(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_PORT_BUF_PHY_IDLE),
+ timeout_us, 10, 10);
+ else
+ ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE),
+ timeout_us, 10, 10);
if (ret)
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
@@ -313,6 +332,13 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
DDI_PORT_WIDTH(crtc_state->lane_count) |
DDI_BUF_TRANS_SELECT(0);
+ if (DISPLAY_VER(i915) >= 14) {
+ if (intel_dp_is_uhbr(crtc_state))
+ intel_dp->DP |= DDI_BUF_PORT_DATA_40BIT;
+ else
+ intel_dp->DP |= DDI_BUF_PORT_DATA_10BIT;
+ }
+
if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
if (!intel_tc_port_in_tbt_alt_mode(dig_port))
@@ -515,6 +541,8 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
+ if (DISPLAY_VER(dev_priv) >= 14)
+ temp |= TRANS_DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
@@ -2309,6 +2337,179 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
OVERLAP_PIXELS_MASK, dss1);
}
+static u8 mtl_get_port_width(u8 lane_count)
+{
+ switch (lane_count) {
+ case 1:
+ return 0;
+ case 2:
+ return 1;
+ case 3:
+ return 4;
+ case 4:
+ return 3;
+ default:
+ MISSING_CASE(lane_count);
+ return 4;
+ }
+}
+
+static void
+mtl_ddi_enable_d2d(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port), 0,
+ XELPDP_PORT_BUF_D2D_LINK_ENABLE);
+
+ if (wait_for_us((intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
+ XELPDP_PORT_BUF_D2D_LINK_STATE), 100)) {
+ drm_err(&dev_priv->drm, "Timeout waiting for D2D Link enable for PORT_BUF_CTL %c\n",
+ port_name(port));
+ }
+}
+
+static void mtl_port_buf_ctl_program(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum port port = encoder->port;
+ u32 val;
+
+ val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ val &= ~XELPDP_PORT_WIDTH_MASK;
+ val |= XELPDP_PORT_WIDTH(mtl_get_port_width(crtc_state->lane_count));
+
+ val &= ~XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK;
+ if (intel_dp_is_uhbr(crtc_state))
+ val |= XELPDP_PORT_BUF_PORT_DATA_40BIT;
+ else
+ val |= XELPDP_PORT_BUF_PORT_DATA_10BIT;
+
+ if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ val |= XELPDP_PORT_REVERSAL;
+
+ intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+}
+
+static void mtl_port_buf_ctl_io_selection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ u32 val;
+
+ val = intel_tc_port_in_tbt_alt_mode(dig_port) ?
+ XELPDP_PORT_BUF_IO_SELECT_TBT : 0;
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL1(encoder->port),
+ XELPDP_PORT_BUF_IO_SELECT_TBT, val);
+}
+
+static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+
+ intel_dp_set_link_params(intel_dp,
+ crtc_state->port_clock,
+ crtc_state->lane_count);
+
+ /*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ /*
+ * 1. Enable Power Wells
+ *
+ * This was handled at the beginning of intel_atomic_commit_tail(),
+ * before we called down into this function.
+ */
+
+ /* 2. PMdemand was already set */
+
+ /* 3. Select Thunderbolt */
+ mtl_port_buf_ctl_io_selection(encoder);
+
+ /* 4. Enable Panel Power if PPS is required */
+ intel_pps_on(intel_dp);
+
+ /* 5. Enable the port PLL */
+ intel_ddi_enable_clock(encoder, crtc_state);
+
+ /*
+ * 6.a Configure Transcoder Clock Select to direct the Port clock to the
+ * Transcoder.
+ */
+ intel_ddi_enable_transcoder_clock(encoder, crtc_state);
+
+ /*
+ * 6.b If DP v2.0/128b mode - Configure TRANS_DP2_CTL register settings.
+ */
+ intel_ddi_config_transcoder_dp2(encoder, crtc_state);
+
+ /*
+ * 6.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+ * Transport Select
+ */
+ intel_ddi_config_transcoder_func(encoder, crtc_state);
+
+ /*
+ * 6.e Program CoG/MSO configuration bits in DSS_CTL1 if selected.
+ */
+ intel_ddi_mso_configure(crtc_state);
+
+ if (!is_mst)
+ intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ /*
+ * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+ * in the FEC_CONFIGURATION register to 1 before initiating link
+ * training
+ */
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
+
+ /*
+ * 6. The rest of the below are substeps under the bspec's "Enable and
+ * Train Display Port" step. Note that steps that are specific to
+ * MST will be handled by intel_mst_pre_enable_dp() before/after it
+ * calls into this function. Also intel_mst_pre_enable_dp() only calls
+ * us when active_mst_links==0, so any steps designated for "single
+ * stream or multi-stream master transcoder" can just be performed
+ * unconditionally here.
+ *
+ * mtl_ddi_prepare_link_retrain() that is called by
+ * intel_dp_start_link_train() will execute steps: 6.d, 6.f, 6.g, 6.h,
+ * 6.i and 6.j
+ *
+ * 6.k Follow DisplayPort specification training sequence (see notes for
+ * failure handling)
+ * 6.m If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+ * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+ * (timeout after 800 us)
+ */
+ intel_dp_start_link_train(intel_dp, crtc_state);
+
+ /* 6.n Set DP_TP_CTL link training to Normal */
+ if (!is_trans_port_sync_mode(crtc_state))
+ intel_dp_stop_link_train(intel_dp, crtc_state);
+
+ /* 6.o Configure and enable FEC if needed */
+ intel_ddi_enable_fec(encoder, crtc_state);
+
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+}
+
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -2523,7 +2724,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
- if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 14)
+ mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
+ else if (DISPLAY_VER(dev_priv) >= 12)
tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
else
hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2604,8 +2807,50 @@ static void intel_ddi_pre_enable(struct intel_atomic_state *state,
}
}
-static void intel_disable_ddi_buf(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void
+mtl_ddi_disable_d2d_link(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
+ XELPDP_PORT_BUF_D2D_LINK_ENABLE, 0);
+
+ if (wait_for_us(!(intel_de_read(dev_priv, XELPDP_PORT_BUF_CTL1(port)) &
+ XELPDP_PORT_BUF_D2D_LINK_STATE), 100))
+ drm_err(&dev_priv->drm, "Timeout waiting for D2D Link disable for PORT_BUF_CTL %c\n",
+ port_name(port));
+}
+
+static void mtl_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ /* 3.b Clear DDI_CTL_DE Enable to 0. */
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
+ if (val & DDI_BUF_CTL_ENABLE) {
+ val &= ~DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
+
+ /* 3.c Poll for PORT_BUF_CTL Idle Status == 1, timeout after 100us */
+ mtl_wait_ddi_buf_idle(dev_priv, port);
+ }
+
+ /* 3.d Disable D2D Link */
+ mtl_ddi_disable_d2d_link(encoder);
+
+ /* 3.e Disable DP_TP_CTL */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
+ DP_TP_CTL_ENABLE, 0);
+ }
+}
+
+static void disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -2630,6 +2875,21 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
intel_wait_ddi_buf_idle(dev_priv, port);
}
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ mtl_disable_ddi_buf(encoder, crtc_state);
+
+ /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+ } else {
+ disable_ddi_buf(encoder, crtc_state);
+ }
+}
+
static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -2638,6 +2898,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
+ intel_wakeref_t wakeref;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
@@ -2677,12 +2938,19 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
- if (!intel_tc_port_in_tbt_alt_mode(dig_port))
+ wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
+
+ if (wakeref)
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
- fetch_and_zero(&dig_port->ddi_io_wakeref));
+ wakeref);
intel_ddi_disable_clock(encoder);
+
+ /* De-select Thunderbolt */
+ if (DISPLAY_VER(dev_priv) >= 14)
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(encoder->port),
+ XELPDP_PORT_BUF_IO_SELECT_TBT, 0);
}
static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
@@ -2693,6 +2961,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
+ intel_wakeref_t wakeref;
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
@@ -2705,9 +2974,11 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 12)
intel_ddi_disable_transcoder_clock(old_crtc_state);
- intel_display_power_put(dev_priv,
- dig_port->ddi_io_power_domain,
- fetch_and_zero(&dig_port->ddi_io_wakeref));
+ wakeref = fetch_and_zero(&dig_port->ddi_io_wakeref);
+ if (wakeref)
+ intel_display_power_put(dev_priv,
+ dig_port->ddi_io_power_domain,
+ wakeref);
intel_ddi_disable_clock(encoder);
@@ -2725,8 +2996,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
- intel_vrr_disable(old_crtc_state);
-
intel_disable_transcoder(old_crtc_state);
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -2890,6 +3159,10 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
if (has_buf_trans_select(dev_priv))
hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
+ /* e. Enable D2D Link for C10/C20 Phy */
+ if (DISPLAY_VER(dev_priv) >= 14)
+ mtl_ddi_enable_d2d(encoder);
+
encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
@@ -2935,12 +3208,30 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
*
* On ADL_P the PHY link rate and lane count must be programmed but
* these are both 0 for HDMI.
+ *
+ * But MTL onwards HDMI2.1 is supported and in TMDS mode this
+ * is filled with lane count, already set in the crtc_state.
+ * The same is required to be filled in PORT_BUF_CTL for C10/20 Phy.
*/
buf_ctl = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE;
- if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ u8 lane_count = mtl_get_port_width(crtc_state->lane_count);
+ u32 port_buf = 0;
+
+ port_buf |= XELPDP_PORT_WIDTH(lane_count);
+
+ if (dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL)
+ port_buf |= XELPDP_PORT_REVERSAL;
+
+ intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(port),
+ XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
+
+ buf_ctl |= DDI_PORT_WIDTH(lane_count);
+ } else if (IS_ALDERLAKE_P(dev_priv) && intel_phy_is_tc(dev_priv, phy)) {
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_in_legacy_mode(dig_port));
buf_ctl |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
+
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_wait_ddi_buf_active(dev_priv, port);
@@ -2963,8 +3254,6 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_enable_transcoder(crtc_state);
- intel_vrr_enable(encoder, crtc_state);
-
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -3070,7 +3359,8 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *slave_crtc;
enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (!intel_phy_is_tc(i915, phy))
+ /* FIXME: Add MTL pll_mgr */
+ if (DISPLAY_VER(i915) >= 14 || !intel_phy_is_tc(i915, phy))
return;
intel_update_active_dpll(state, crtc, encoder);
@@ -3121,6 +3411,53 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port, ln), DKL_PCS_DW5_CORE_SOFTRESET, 0);
}
+static void mtl_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 dp_tp_ctl;
+
+ /*
+ * TODO: To train with only a different voltage swing entry is not
+ * necessary disable and enable port
+ */
+ dp_tp_ctl = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+ if (dp_tp_ctl & DP_TP_CTL_ENABLE)
+ mtl_disable_ddi_buf(encoder, crtc_state);
+
+ /* 6.d Configure and enable DP_TP_CTL with link training pattern 1 selected */
+ dp_tp_ctl = DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+ dp_tp_ctl |= DP_TP_CTL_MODE_MST;
+ } else {
+ dp_tp_ctl |= DP_TP_CTL_MODE_SST;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
+ intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl);
+ intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
+
+ /* 6.f Enable D2D Link */
+ mtl_ddi_enable_d2d(encoder);
+
+ /* 6.g Configure voltage swing and related IO settings */
+ encoder->set_signal_levels(encoder, crtc_state);
+
+ /* 6.h Configure PORT_BUF_CTL1 */
+ mtl_port_buf_ctl_program(encoder, crtc_state);
+
+ /* 6.i Configure and enable DDI_CTL_DE to start sending valid data to port slice */
+ intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
+
+ /* 6.j Poll for PORT_BUF_CTL Idle Status == 0, timeout after 100 us */
+ intel_wait_ddi_buf_active(dev_priv, port);
+}
+
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -3369,7 +3706,11 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
fallthrough;
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- pipe_config->lane_count = 4;
+ if (DISPLAY_VER(dev_priv) >= 14)
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ else
+ pipe_config->lane_count = 4;
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
if (encoder->type == INTEL_OUTPUT_EDP)
@@ -3506,6 +3847,28 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
&crtc_state->dpll_hw_state);
}
+static void mtl_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
+ crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
+ } else if (intel_is_c10phy(i915, phy)) {
+ intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10);
+ intel_c10pll_dump_hw_state(i915, &crtc_state->cx0pll_state.c10);
+ crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
+ } else {
+ intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20);
+ intel_c20pll_dump_hw_state(i915, &crtc_state->cx0pll_state.c20);
+ crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ }
+
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void dg2_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -3700,6 +4063,9 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) {
+ pipe_config->has_hdmi_sink =
+ intel_hdmi_compute_has_hdmi_sink(encoder, pipe_config, conn_state);
+
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
} else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
@@ -3873,6 +4239,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -3881,7 +4248,10 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
return NULL;
dig_port->dp.output_reg = DDI_BUF_CTL(port);
- dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
+ if (DISPLAY_VER(i915) >= 14)
+ dig_port->dp.prepare_link_retrain = mtl_ddi_prepare_link_retrain;
+ else
+ dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain;
dig_port->dp.set_link_train = intel_ddi_set_link_train;
dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
@@ -3921,6 +4291,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
return -ENOMEM;
state->acquire_ctx = ctx;
+ to_intel_atomic_state(state)->internal = true;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
@@ -4413,7 +4784,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (IS_DG2(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ encoder->enable_clock = intel_mtl_pll_enable;
+ encoder->disable_clock = intel_mtl_pll_disable;
+ encoder->port_pll_type = intel_mtl_port_pll_type;
+ encoder->get_config = mtl_ddi_get_config;
+ } else if (IS_DG2(dev_priv)) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
encoder->get_config = dg2_ddi_get_config;
@@ -4473,7 +4849,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_config = hsw_ddi_get_config;
}
- if (IS_DG2(dev_priv)) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
+ } else if (IS_DG2(dev_priv)) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
} else if (DISPLAY_VER(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 006a2e979000..b7d20485bde5 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -9,6 +9,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_cx0_phy.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1035,6 +1036,65 @@ static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = {
.num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr),
};
+static const union intel_ddi_buf_trans_entry _mtl_c10_trans_dp14[] = {
+ { .snps = { 26, 0, 0 } }, /* preset 0 */
+ { .snps = { 33, 0, 6 } }, /* preset 1 */
+ { .snps = { 38, 0, 11 } }, /* preset 2 */
+ { .snps = { 43, 0, 19 } }, /* preset 3 */
+ { .snps = { 39, 0, 0 } }, /* preset 4 */
+ { .snps = { 45, 0, 7 } }, /* preset 5 */
+ { .snps = { 46, 0, 13 } }, /* preset 6 */
+ { .snps = { 46, 0, 0 } }, /* preset 7 */
+ { .snps = { 55, 0, 7 } }, /* preset 8 */
+ { .snps = { 62, 0, 0 } }, /* preset 9 */
+};
+
+static const struct intel_ddi_buf_trans mtl_cx0_trans = {
+ .entries = _mtl_c10_trans_dp14,
+ .num_entries = ARRAY_SIZE(_mtl_c10_trans_dp14),
+ .hdmi_default_entry = ARRAY_SIZE(_mtl_c10_trans_dp14) - 1,
+};
+
+/* DP2.0 */
+static const union intel_ddi_buf_trans_entry _mtl_c20_trans_uhbr[] = {
+ { .snps = { 48, 0, 0 } }, /* preset 0 */
+ { .snps = { 43, 0, 5 } }, /* preset 1 */
+ { .snps = { 40, 0, 8 } }, /* preset 2 */
+ { .snps = { 37, 0, 11 } }, /* preset 3 */
+ { .snps = { 33, 0, 15 } }, /* preset 4 */
+ { .snps = { 46, 2, 0 } }, /* preset 5 */
+ { .snps = { 42, 2, 4 } }, /* preset 6 */
+ { .snps = { 38, 2, 8 } }, /* preset 7 */
+ { .snps = { 35, 2, 11 } }, /* preset 8 */
+ { .snps = { 33, 2, 13 } }, /* preset 9 */
+ { .snps = { 44, 4, 0 } }, /* preset 10 */
+ { .snps = { 40, 4, 4 } }, /* preset 11 */
+ { .snps = { 37, 4, 7 } }, /* preset 12 */
+ { .snps = { 33, 4, 11 } }, /* preset 13 */
+ { .snps = { 40, 8, 0 } }, /* preset 14 */
+ { .snps = { 28, 2, 2 } }, /* preset 15 */
+};
+
+/* HDMI2.0 */
+static const union intel_ddi_buf_trans_entry _mtl_c20_trans_hdmi[] = {
+ { .snps = { 48, 0, 0 } }, /* preset 0 */
+ { .snps = { 38, 4, 6 } }, /* preset 1 */
+ { .snps = { 36, 4, 8 } }, /* preset 2 */
+ { .snps = { 34, 4, 10 } }, /* preset 3 */
+ { .snps = { 32, 4, 12 } }, /* preset 4 */
+};
+
+static const struct intel_ddi_buf_trans mtl_c20_trans_hdmi = {
+ .entries = _mtl_c20_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_mtl_c20_trans_hdmi),
+ .hdmi_default_entry = 0,
+};
+
+static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
+ .entries = _mtl_c20_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
+};
+
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
@@ -1606,12 +1666,30 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder,
return intel_get_buf_trans(&dg2_snps_trans, n_entries);
}
+static const struct intel_ddi_buf_trans *
+mtl_get_cx0_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->port_clock >= 1000000)
+ return intel_get_buf_trans(&mtl_c20_trans_uhbr, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !(intel_is_c10phy(i915, phy)))
+ return intel_get_buf_trans(&mtl_c20_trans_hdmi, n_entries);
+ else
+ return intel_get_buf_trans(&mtl_cx0_trans, n_entries);
+}
+
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (IS_DG2(i915)) {
+ if (DISPLAY_VER(i915) >= 14) {
+ encoder->get_buf_trans = mtl_get_cx0_buf_trans;
+ } else if (IS_DG2(i915)) {
encoder->get_buf_trans = dg2_get_snps_buf_trans;
} else if (IS_ALDERLAKE_P(i915)) {
if (intel_phy_is_combo(i915, phy))
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3c29792137a5..1d5d42a40803 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -31,8 +31,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
-#include <linux/vga_switcheroo.h>
-#include <acpi/video.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
@@ -41,7 +39,6 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
@@ -57,7 +54,6 @@
#include "i9xx_plane.h"
#include "i9xx_wm.h"
#include "icl_dsi.h"
-#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_audio.h"
@@ -70,7 +66,7 @@
#include "intel_crtc_state_dump.h"
#include "intel_ddi.h"
#include "intel_de.h"
-#include "intel_display_debugfs.h"
+#include "intel_display_driver.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
@@ -90,11 +86,8 @@
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
-#include "intel_gmbus.h"
-#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_hti.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
@@ -108,7 +101,6 @@
#include "intel_plane_initial.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_quirks.h"
#include "intel_sdvo.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
@@ -131,7 +123,6 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state);
-static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
/* returns HPLL frequency in kHz */
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
@@ -178,7 +169,7 @@ int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
return hpll;
}
-static void intel_update_czclk(struct drm_i915_private *dev_priv)
+void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
@@ -701,175 +692,6 @@ intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
return y;
}
-static int
-intel_display_commit_duplicated_state(struct intel_atomic_state *state,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- int ret;
-
- ret = drm_atomic_helper_commit_duplicated_state(&state->base, ctx);
-
- drm_WARN_ON(&i915->drm, ret == -EDEADLK);
-
- return ret;
-}
-
-static int
-__intel_display_resume(struct drm_i915_private *i915,
- struct drm_atomic_state *state,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- intel_modeset_setup_hw_state(i915, ctx);
- intel_vga_redisable(i915);
-
- if (!state)
- return 0;
-
- /*
- * We've duplicated the state, pointers to the old state are invalid.
- *
- * Don't attempt to use the old state until we commit the duplicated state.
- */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- /*
- * Force recalculation even if we restore
- * current state. With fast modeset this may not result
- * in a modeset when the state is compatible.
- */
- crtc_state->mode_changed = true;
- }
-
- /* ignore any reset values/BIOS leftovers in the WM registers */
- if (!HAS_GMCH(i915))
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
-
- return intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
-}
-
-static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
-{
- return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(to_gt(dev_priv)));
-}
-
-void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
-{
- struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
- struct drm_atomic_state *state;
- int ret;
-
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- /* reset doesn't touch the display */
- if (!dev_priv->params.force_reset_modeset_test &&
- !gpu_reset_clobbers_display(dev_priv))
- return;
-
- /* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
- smp_mb__after_atomic();
- wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
-
- if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- drm_dbg_kms(&dev_priv->drm,
- "Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(to_gt(dev_priv));
- }
-
- /*
- * Need mode_config.mutex so that we don't
- * trample ongoing ->detect() and whatnot.
- */
- mutex_lock(&dev_priv->drm.mode_config.mutex);
- drm_modeset_acquire_init(ctx, 0);
- while (1) {
- ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
- if (ret != -EDEADLK)
- break;
-
- drm_modeset_backoff(ctx);
- }
- /*
- * Disabling the crtcs gracefully seems nicer. Also the
- * g33 docs say we should at least disable all the planes.
- */
- state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
- if (IS_ERR(state)) {
- ret = PTR_ERR(state);
- drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
- ret);
- return;
- }
-
- ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
- if (ret) {
- drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
- ret);
- drm_atomic_state_put(state);
- return;
- }
-
- dev_priv->display.restore.modeset_state = state;
- state->acquire_ctx = ctx;
-}
-
-void intel_display_finish_reset(struct drm_i915_private *i915)
-{
- struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
- struct drm_atomic_state *state;
- int ret;
-
- if (!HAS_DISPLAY(i915))
- return;
-
- /* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
- return;
-
- state = fetch_and_zero(&i915->display.restore.modeset_state);
- if (!state)
- goto unlock;
-
- /* reset doesn't touch the display */
- if (!gpu_reset_clobbers_display(i915)) {
- /* for testing only restore the display */
- ret = intel_display_commit_duplicated_state(to_intel_atomic_state(state), ctx);
- if (ret)
- drm_err(&i915->drm,
- "Restoring old state failed with %i\n", ret);
- } else {
- /*
- * The display has been reset as well,
- * so need a full re-initialization.
- */
- intel_pps_unlock_regs_wa(i915);
- intel_modeset_init_hw(i915);
- intel_clock_gating_init(i915);
- intel_hpd_init(i915);
-
- ret = __intel_display_resume(i915, state, ctx);
- if (ret)
- drm_err(&i915->drm,
- "Restoring old state failed with %i\n", ret);
-
- intel_hpd_poll_disable(i915);
- }
-
- drm_atomic_state_put(state);
-unlock:
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
- mutex_unlock(&i915->drm.mode_config.mutex);
-
- clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
-}
-
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -990,8 +812,10 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
else
intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE |
PF_FILTER_MED_3x3);
- intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
- intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
+ intel_de_write_fw(dev_priv, PF_WIN_POS(pipe),
+ PF_WIN_XPOS(x) | PF_WIN_YPOS(y));
+ intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe),
+ PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height));
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
@@ -1069,20 +893,40 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
+#define is_enabling(feature, old_crtc_state, new_crtc_state) \
+ ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
+ (new_crtc_state)->feature)
+#define is_disabling(feature, old_crtc_state, new_crtc_state) \
+ ((old_crtc_state)->feature && \
+ (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)))
+
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
- new_crtc_state->active_planes;
+ return is_enabling(active_planes, old_crtc_state, new_crtc_state);
}
static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
- return old_crtc_state->active_planes &&
- (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
+ return is_disabling(active_planes, old_crtc_state, new_crtc_state);
}
+static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return is_enabling(vrr.enable, old_crtc_state, new_crtc_state);
+}
+
+static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return is_disabling(vrr.enable, old_crtc_state, new_crtc_state);
+}
+
+#undef is_disabling
+#undef is_enabling
+
static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1196,6 +1040,11 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ if (vrr_disabling(old_crtc_state, new_crtc_state)) {
+ intel_vrr_disable(old_crtc_state);
+ intel_crtc_update_active_timings(old_crtc_state, false);
+ }
+
intel_drrs_deactivate(old_crtc_state);
intel_psr_pre_plane_update(state, crtc);
@@ -1676,6 +1525,8 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
+ if (HAS_VRR(dev_priv))
+ intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder),
@@ -1907,7 +1758,7 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
if (IS_DG2(dev_priv))
/* DG2's "TC1" output uses a SNPS PHY */
return false;
- else if (IS_ALDERLAKE_P(dev_priv))
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_METEORLAKE(dev_priv))
return phy >= PHY_F && phy <= PHY_I;
else if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
@@ -2214,30 +2065,6 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
i830_enable_pipe(dev_priv, pipe);
}
-
-/*
- * turn all crtc's off, but do not adjust state
- * This has to be paired with a call to intel_modeset_setup_hw_state.
- */
-int intel_display_suspend(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *state;
- int ret;
-
- if (!HAS_DISPLAY(dev_priv))
- return 0;
-
- state = drm_atomic_helper_suspend(dev);
- ret = PTR_ERR_OR_ZERO(state);
- if (ret)
- drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
- ret);
- else
- dev_priv->display.restore.modeset_state = state;
- return ret;
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -2567,7 +2394,7 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
0x80000);
}
-static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
+void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
{
/*
* There may be no VBT; and if the BIOS enabled SSC we can
@@ -2923,6 +2750,7 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe;
u32 tmp;
if (!i9xx_has_pfit(dev_priv))
@@ -2933,13 +2761,13 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
return;
/* Check whether the pfit is attached to our pipe. */
- if (DISPLAY_VER(dev_priv) < 4) {
- if (crtc->pipe != PIPE_B)
- return;
- } else {
- if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
- return;
- }
+ if (DISPLAY_VER(dev_priv) >= 4)
+ pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp);
+ else
+ pipe = PIPE_B;
+
+ if (pipe != crtc->pipe)
+ return;
crtc_state->gmch_pfit.control = tmp;
crtc_state->gmch_pfit.pgm_ratios =
@@ -3061,6 +2889,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->sink_format = pipe_config->output_format;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -3398,73 +3227,39 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
}
-static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
- u32 pos, u32 size)
-{
- drm_rect_init(&crtc_state->pch_pfit.dst,
- pos >> 16, pos & 0xffff,
- size >> 16, size & 0xffff);
-}
-
-static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
- int id = -1;
- int i;
-
- /* find scaler attached to this pipe */
- for (i = 0; i < crtc->num_scalers; i++) {
- u32 ctl, pos, size;
-
- ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
- if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
- continue;
-
- id = i;
- crtc_state->pch_pfit.enabled = true;
-
- pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
- size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
-
- ilk_get_pfit_pos_size(crtc_state, pos, size);
-
- scaler_state->scalers[i].in_use = true;
- break;
- }
-
- scaler_state->scaler_id = id;
- if (id >= 0)
- scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
- else
- scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
-}
-
static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 ctl, pos, size;
+ enum pipe pipe;
ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
if ((ctl & PF_ENABLE) == 0)
return;
+ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
+ pipe = REG_FIELD_GET(PF_PIPE_SEL_MASK_IVB, ctl);
+ else
+ pipe = crtc->pipe;
+
crtc_state->pch_pfit.enabled = true;
pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
- ilk_get_pfit_pos_size(crtc_state, pos, size);
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PF_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PF_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PF_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PF_WIN_YSIZE_MASK, size));
/*
* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now.
*/
- drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 7 &&
- (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
+ drm_WARN_ON(&dev_priv->drm, pipe != crtc->pipe);
}
static bool ilk_get_pipe_config(struct intel_crtc *crtc,
@@ -3520,6 +3315,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
break;
}
+ pipe_config->sink_format = pipe_config->output_format;
+
pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp);
pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1;
@@ -3901,7 +3698,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_transcoder_timings(crtc, pipe_config);
if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
- intel_vrr_get_config(crtc, pipe_config);
+ intel_vrr_get_config(pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
@@ -3918,6 +3715,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
bdw_get_pipe_misc_output_format(crtc);
}
+ pipe_config->sink_format = pipe_config->output_format;
+
pipe_config->gamma_mode = intel_de_read(dev_priv,
GAMMA_MODE(crtc->pipe));
@@ -3947,7 +3746,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (intel_display_power_get_in_set_if_enabled(dev_priv, &crtc->hw_readout_power_domains,
POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
if (DISPLAY_VER(dev_priv) >= 9)
- skl_get_pfit_config(pipe_config);
+ skl_scaler_get_config(pipe_config);
else
ilk_get_pfit_config(pipe_config);
}
@@ -3995,218 +3794,6 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-/* VESA 640x480x72Hz mode to set on the pipe */
-static const struct drm_display_mode load_detect_mode = {
- DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
-};
-
-static int intel_modeset_disable_planes(struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
- int ret, i;
-
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
- for_each_new_plane_in_state(state, plane, plane_state, i) {
- if (plane_state->crtc != crtc)
- continue;
-
- ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
- if (ret)
- return ret;
-
- drm_atomic_set_fb_for_plane(plane_state, NULL);
- }
-
- return 0;
-}
-
-int intel_get_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_encoder *encoder =
- intel_attached_encoder(to_intel_connector(connector));
- struct intel_crtc *possible_crtc;
- struct intel_crtc *crtc = NULL;
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_config *config = &dev->mode_config;
- struct drm_atomic_state *state = NULL, *restore_state = NULL;
- struct drm_connector_state *connector_state;
- struct intel_crtc_state *crtc_state;
- int ret;
-
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.base.id, encoder->base.name);
-
- old->restore_state = NULL;
-
- drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
-
- /*
- * Algorithm gets a little messy:
- *
- * - if the connector already has an assigned crtc, use it (but make
- * sure it's on first)
- *
- * - try to find the first unused crtc that can drive this connector,
- * and use that if we find one
- */
-
- /* See if we already have a CRTC for this connector */
- if (connector->state->crtc) {
- crtc = to_intel_crtc(connector->state->crtc);
-
- ret = drm_modeset_lock(&crtc->base.mutex, ctx);
- if (ret)
- goto fail;
-
- /* Make sure the crtc and connector are running */
- goto found;
- }
-
- /* Find an unused one (if possible) */
- for_each_intel_crtc(dev, possible_crtc) {
- if (!(encoder->base.possible_crtcs &
- drm_crtc_mask(&possible_crtc->base)))
- continue;
-
- ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
- if (ret)
- goto fail;
-
- if (possible_crtc->base.state->enable) {
- drm_modeset_unlock(&possible_crtc->base.mutex);
- continue;
- }
-
- crtc = possible_crtc;
- break;
- }
-
- /*
- * If we didn't find an unused CRTC, don't use any.
- */
- if (!crtc) {
- drm_dbg_kms(&dev_priv->drm,
- "no pipe available for load-detect\n");
- ret = -ENODEV;
- goto fail;
- }
-
-found:
- state = drm_atomic_state_alloc(dev);
- restore_state = drm_atomic_state_alloc(dev);
- if (!state || !restore_state) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state->acquire_ctx = ctx;
- restore_state->acquire_ctx = ctx;
-
- connector_state = drm_atomic_get_connector_state(state, connector);
- if (IS_ERR(connector_state)) {
- ret = PTR_ERR(connector_state);
- goto fail;
- }
-
- ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
- if (ret)
- goto fail;
-
- crtc_state = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- goto fail;
- }
-
- crtc_state->uapi.active = true;
-
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
- &load_detect_mode);
- if (ret)
- goto fail;
-
- ret = intel_modeset_disable_planes(state, &crtc->base);
- if (ret)
- goto fail;
-
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
- if (!ret)
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
- if (!ret)
- ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Failed to create a copy of old state to restore: %i\n",
- ret);
- goto fail;
- }
-
- ret = drm_atomic_commit(state);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to set mode on load-detect pipe\n");
- goto fail;
- }
-
- old->restore_state = restore_state;
- drm_atomic_state_put(state);
-
- /* let the connector get through one full cycle before testing */
- intel_crtc_wait_for_next_vblank(crtc);
-
- return true;
-
-fail:
- if (state) {
- drm_atomic_state_put(state);
- state = NULL;
- }
- if (restore_state) {
- drm_atomic_state_put(restore_state);
- restore_state = NULL;
- }
-
- if (ret == -EDEADLK)
- return ret;
-
- return false;
-}
-
-void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct intel_encoder *intel_encoder =
- intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_atomic_state *state = old->restore_state;
- int ret;
-
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
-
- if (!state)
- return;
-
- ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- if (ret)
- drm_dbg_kms(&i915->drm,
- "Couldn't release load detect pipe: %i\n", ret);
- drm_atomic_state_put(state);
-}
-
static int i9xx_pll_refclk(struct drm_device *dev,
const struct intel_crtc_state *pipe_config)
{
@@ -5573,6 +5160,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_CSC(name) do { \
+ PIPE_CONF_CHECK_X(name.preoff[0]); \
+ PIPE_CONF_CHECK_X(name.preoff[1]); \
+ PIPE_CONF_CHECK_X(name.preoff[2]); \
+ PIPE_CONF_CHECK_X(name.coeff[0]); \
+ PIPE_CONF_CHECK_X(name.coeff[1]); \
+ PIPE_CONF_CHECK_X(name.coeff[2]); \
+ PIPE_CONF_CHECK_X(name.coeff[3]); \
+ PIPE_CONF_CHECK_X(name.coeff[4]); \
+ PIPE_CONF_CHECK_X(name.coeff[5]); \
+ PIPE_CONF_CHECK_X(name.coeff[6]); \
+ PIPE_CONF_CHECK_X(name.coeff[7]); \
+ PIPE_CONF_CHECK_X(name.coeff[8]); \
+ PIPE_CONF_CHECK_X(name.postoff[0]); \
+ PIPE_CONF_CHECK_X(name.postoff[1]); \
+ PIPE_CONF_CHECK_X(name.postoff[2]); \
+} while (0)
+
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
@@ -5670,6 +5275,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
+ PIPE_CONF_CHECK_CSC(csc);
+ PIPE_CONF_CHECK_CSC(output_csc);
+
if (current_config->active_planes) {
PIPE_CONF_CHECK_BOOL(has_psr);
PIPE_CONF_CHECK_BOOL(has_psr2);
@@ -5756,7 +5364,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(splitter.link_count);
PIPE_CONF_CHECK_I(splitter.pixel_overlap);
- PIPE_CONF_CHECK_BOOL(vrr.enable);
+ if (!fastset)
+ PIPE_CONF_CHECK_BOOL(vrr.enable);
PIPE_CONF_CHECK_I(vrr.vmin);
PIPE_CONF_CHECK_I(vrr.vmax);
PIPE_CONF_CHECK_I(vrr.flipline);
@@ -6536,6 +6145,13 @@ int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
+ /*
+ * crtc's state no longer considered to be inherited
+ * after the first userspace/client initiated commit.
+ */
+ if (!state->internal)
+ new_crtc_state->inherited = false;
+
if (new_crtc_state->inherited != old_crtc_state->inherited)
new_crtc_state->uapi.mode_changed = true;
@@ -6881,7 +6497,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
if (!intel_crtc_needs_modeset(new_crtc_state))
return;
- intel_crtc_update_active_timings(new_crtc_state);
+ /* VRR will be enable later, if required */
+ intel_crtc_update_active_timings(new_crtc_state, false);
dev_priv->display.funcs.display->crtc_enable(state, crtc);
@@ -6908,6 +6525,12 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_dpt_configure(crtc);
}
+ if (vrr_enabling(old_crtc_state, new_crtc_state)) {
+ intel_vrr_enable(new_crtc_state);
+ intel_crtc_update_active_timings(new_crtc_state,
+ new_crtc_state->vrr.enable);
+ }
+
if (!modeset) {
if (new_crtc_state->preload_luts &&
intel_crtc_needs_color_update(new_crtc_state))
@@ -7181,7 +6804,7 @@ static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
drm_atomic_state_put(&state->base);
}
-static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+void intel_atomic_helper_free_state_worker(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
@@ -7313,6 +6936,12 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* 7. New _arm() registers are finally written
* 8. Hardware finally latches a complete set of new
* register values, and subsequent frames will be OK again
+ *
+ * Also note that due to the pipe CSC hardware issues on
+ * SKL/GLK DC states must remain off until the pipe CSC
+ * state readout has happened. Otherwise we risk corrupting
+ * the CSC latched register values with the readout (see
+ * skl_read_csc() and skl_color_commit_noarm()).
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
@@ -7523,9 +7152,8 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
plane->frontbuffer_bit);
}
-static int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *_state,
- bool nonblock)
+int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
+ bool nonblock)
{
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -7627,19 +7255,6 @@ void intel_plane_destroy(struct drm_plane *plane)
kfree(to_intel_plane(plane));
}
-static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
-{
- struct intel_plane *plane;
-
- for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
- plane->pipe);
-
- plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
- }
-}
-
-
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -7719,7 +7334,7 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return true;
}
-static void intel_setup_outputs(struct drm_i915_private *dev_priv)
+void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -7729,7 +7344,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_DG2(dev_priv)) {
+ if (IS_METEORLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_TC1);
+ intel_ddi_init(dev_priv, PORT_TC2);
+ intel_ddi_init(dev_priv, PORT_TC3);
+ intel_ddi_init(dev_priv, PORT_TC4);
+ } else if (IS_DG2(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -7971,9 +7593,8 @@ static int max_dotclock(struct drm_i915_private *i915)
return max_dotclock;
}
-static enum drm_mode_status
-intel_mode_valid(struct drm_device *dev,
- const struct drm_display_mode *mode)
+enum drm_mode_status intel_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int hdisplay_max, htotal_max;
@@ -8113,18 +7734,6 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
return MODE_OK;
}
-static const struct drm_mode_config_funcs intel_mode_funcs = {
- .fb_create = intel_user_framebuffer_create,
- .get_format_info = intel_fb_get_format_info,
- .output_poll_changed = intel_fbdev_output_poll_changed,
- .mode_valid = intel_mode_valid,
- .atomic_check = intel_atomic_check,
- .atomic_commit = intel_atomic_commit,
- .atomic_state_alloc = intel_atomic_state_alloc,
- .atomic_state_clear = intel_atomic_state_clear,
- .atomic_state_free = intel_atomic_state_free,
-};
-
static const struct intel_display_funcs skl_display_funcs = {
.get_pipe_config = hsw_get_pipe_config,
.crtc_enable = hsw_crtc_enable,
@@ -8171,15 +7780,6 @@ static const struct intel_display_funcs i9xx_display_funcs = {
*/
void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
- if (!HAS_DISPLAY(dev_priv))
- return;
-
- intel_color_init_hooks(dev_priv);
- intel_init_cdclk_hooks(dev_priv);
- intel_audio_hooks_init(dev_priv);
-
- intel_dpll_init_clock_hook(dev_priv);
-
if (DISPLAY_VER(dev_priv) >= 9) {
dev_priv->display.funcs.display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
@@ -8192,25 +7792,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
} else {
dev_priv->display.funcs.display = &i9xx_display_funcs;
}
-
- intel_fdi_init_hook(dev_priv);
}
-void intel_modeset_init_hw(struct drm_i915_private *i915)
-{
- struct intel_cdclk_state *cdclk_state;
-
- if (!HAS_DISPLAY(i915))
- return;
-
- cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
-
- intel_update_cdclk(i915);
- intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
- cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
-}
-
-static int intel_initial_commit(struct drm_device *dev)
+int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
struct drm_modeset_acquire_ctx ctx;
@@ -8223,9 +7807,10 @@ static int intel_initial_commit(struct drm_device *dev)
drm_modeset_acquire_init(&ctx, 0);
-retry:
state->acquire_ctx = &ctx;
+ to_intel_atomic_state(state)->internal = true;
+retry:
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
intel_atomic_get_crtc_state(state, crtc);
@@ -8238,15 +7823,6 @@ retry:
if (crtc_state->hw.active) {
struct intel_encoder *encoder;
- /*
- * We've not yet detected sink capabilities
- * (audio,infoframes,etc.) and thus we don't want to
- * force a full state recomputation yet. We want that to
- * happen only for the first real commit from userspace.
- * So preserve the inherited flag for the time being.
- */
- crtc_state->inherited = true;
-
ret = drm_atomic_add_affected_planes(state, &crtc->base);
if (ret)
goto out;
@@ -8289,246 +7865,6 @@ out:
return ret;
}
-static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
-};
-
-static void intel_mode_config_init(struct drm_i915_private *i915)
-{
- struct drm_mode_config *mode_config = &i915->drm.mode_config;
-
- drm_mode_config_init(&i915->drm);
- INIT_LIST_HEAD(&i915->display.global.obj_list);
-
- mode_config->min_width = 0;
- mode_config->min_height = 0;
-
- mode_config->preferred_depth = 24;
- mode_config->prefer_shadow = 1;
-
- mode_config->funcs = &intel_mode_funcs;
- mode_config->helper_private = &intel_mode_config_funcs;
-
- mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
-
- /*
- * Maximum framebuffer dimensions, chosen to match
- * the maximum render engine surface size on gen4+.
- */
- if (DISPLAY_VER(i915) >= 7) {
- mode_config->max_width = 16384;
- mode_config->max_height = 16384;
- } else if (DISPLAY_VER(i915) >= 4) {
- mode_config->max_width = 8192;
- mode_config->max_height = 8192;
- } else if (DISPLAY_VER(i915) == 3) {
- mode_config->max_width = 4096;
- mode_config->max_height = 4096;
- } else {
- mode_config->max_width = 2048;
- mode_config->max_height = 2048;
- }
-
- if (IS_I845G(i915) || IS_I865G(i915)) {
- mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
- mode_config->cursor_height = 1023;
- } else if (IS_I830(i915) || IS_I85X(i915) ||
- IS_I915G(i915) || IS_I915GM(i915)) {
- mode_config->cursor_width = 64;
- mode_config->cursor_height = 64;
- } else {
- mode_config->cursor_width = 256;
- mode_config->cursor_height = 256;
- }
-}
-
-static void intel_mode_config_cleanup(struct drm_i915_private *i915)
-{
- intel_atomic_global_obj_cleanup(i915);
- drm_mode_config_cleanup(&i915->drm);
-}
-
-/* part #1: call before irq install */
-int intel_modeset_init_noirq(struct drm_i915_private *i915)
-{
- int ret;
-
- if (i915_inject_probe_failure(i915))
- return -ENODEV;
-
- if (HAS_DISPLAY(i915)) {
- ret = drm_vblank_init(&i915->drm,
- INTEL_NUM_PIPES(i915));
- if (ret)
- return ret;
- }
-
- intel_bios_init(i915);
-
- ret = intel_vga_register(i915);
- if (ret)
- goto cleanup_bios;
-
- /* FIXME: completely on the wrong abstraction layer */
- ret = intel_power_domains_init(i915);
- if (ret < 0)
- goto cleanup_vga;
-
- intel_power_domains_init_hw(i915, false);
-
- if (!HAS_DISPLAY(i915))
- return 0;
-
- intel_dmc_init(i915);
-
- i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
- i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
- WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
-
- intel_mode_config_init(i915);
-
- ret = intel_cdclk_init(i915);
- if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
-
- ret = intel_color_init(i915);
- if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
-
- ret = intel_dbuf_init(i915);
- if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
-
- ret = intel_bw_init(i915);
- if (ret)
- goto cleanup_vga_client_pw_domain_dmc;
-
- init_llist_head(&i915->display.atomic_helper.free_list);
- INIT_WORK(&i915->display.atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
-
- intel_init_quirks(i915);
-
- intel_fbc_init(i915);
-
- return 0;
-
-cleanup_vga_client_pw_domain_dmc:
- intel_dmc_fini(i915);
- intel_power_domains_driver_remove(i915);
-cleanup_vga:
- intel_vga_unregister(i915);
-cleanup_bios:
- intel_bios_driver_remove(i915);
-
- return ret;
-}
-
-/* part #2: call after irq install, but before gem init */
-int intel_modeset_init_nogem(struct drm_i915_private *i915)
-{
- struct drm_device *dev = &i915->drm;
- enum pipe pipe;
- struct intel_crtc *crtc;
- int ret;
-
- if (!HAS_DISPLAY(i915))
- return 0;
-
- intel_wm_init(i915);
-
- intel_panel_sanitize_ssc(i915);
-
- intel_pps_setup(i915);
-
- intel_gmbus_setup(i915);
-
- drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
-
- for_each_pipe(i915, pipe) {
- ret = intel_crtc_init(i915, pipe);
- if (ret) {
- intel_mode_config_cleanup(i915);
- return ret;
- }
- }
-
- intel_plane_possible_crtcs_init(i915);
- intel_shared_dpll_init(i915);
- intel_fdi_pll_freq_update(i915);
-
- intel_update_czclk(i915);
- intel_modeset_init_hw(i915);
- intel_dpll_update_ref_clks(i915);
-
- intel_hdcp_component_init(i915);
-
- if (i915->display.cdclk.max_cdclk_freq == 0)
- intel_update_max_cdclk(i915);
-
- intel_hti_init(i915);
-
- /* Just disable it once at startup */
- intel_vga_disable(i915);
- intel_setup_outputs(i915);
-
- drm_modeset_lock_all(dev);
- intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
- intel_acpi_assign_connector_fwnodes(i915);
- drm_modeset_unlock_all(dev);
-
- for_each_intel_crtc(dev, crtc) {
- if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
- continue;
- intel_crtc_initial_plane_config(crtc);
- }
-
- /*
- * Make sure hardware watermarks really match the state we read out.
- * Note that we need to do this after reconstructing the BIOS fb's
- * since the watermark calculation done here will use pstate->fb.
- */
- if (!HAS_GMCH(i915))
- ilk_wm_sanitize(i915);
-
- return 0;
-}
-
-/* part #3: call after gem init */
-int intel_modeset_init(struct drm_i915_private *i915)
-{
- int ret;
-
- if (!HAS_DISPLAY(i915))
- return 0;
-
- /*
- * Force all active planes to recompute their states. So that on
- * mode_setcrtc after probe, all the intel_plane_state variables
- * are already calculated and there is no assert_plane warnings
- * during bootup.
- */
- ret = intel_initial_commit(&i915->drm);
- if (ret)
- drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
-
- intel_overlay_setup(i915);
-
- ret = intel_fbdev_init(&i915->drm);
- if (ret)
- return ret;
-
- /* Only enable hotplug handling once the fbdev is fully set up. */
- intel_hpd_init(i915);
- intel_hpd_poll_disable(i915);
-
- skl_watermark_ipc_init(i915);
-
- return 0;
-}
-
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
@@ -8636,45 +7972,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_de_posting_read(dev_priv, DPLL(pipe));
}
-void intel_display_resume(struct drm_device *dev)
-{
- struct drm_i915_private *i915 = to_i915(dev);
- struct drm_atomic_state *state = i915->display.restore.modeset_state;
- struct drm_modeset_acquire_ctx ctx;
- int ret;
-
- if (!HAS_DISPLAY(i915))
- return;
-
- i915->display.restore.modeset_state = NULL;
- if (state)
- state->acquire_ctx = &ctx;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- while (1) {
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret != -EDEADLK)
- break;
-
- drm_modeset_backoff(&ctx);
- }
-
- if (!ret)
- ret = __intel_display_resume(i915, state, &ctx);
-
- skl_watermark_ipc_update(i915);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- if (ret)
- drm_err(&i915->drm,
- "Restoring old state failed with %i\n", ret);
- if (state)
- drm_atomic_state_put(state);
-}
-
-static void intel_hpd_poll_fini(struct drm_i915_private *i915)
+void intel_hpd_poll_fini(struct drm_i915_private *i915)
{
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
@@ -8692,144 +7990,6 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
-/* part #1: call before irq uninstall */
-void intel_modeset_driver_remove(struct drm_i915_private *i915)
-{
- if (!HAS_DISPLAY(i915))
- return;
-
- flush_workqueue(i915->display.wq.flip);
- flush_workqueue(i915->display.wq.modeset);
-
- flush_work(&i915->display.atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
-
- /*
- * MST topology needs to be suspended so we don't have any calls to
- * fbdev after it's finalized. MST will be destroyed later as part of
- * drm_mode_config_cleanup()
- */
- intel_dp_mst_suspend(i915);
-}
-
-/* part #2: call after irq uninstall */
-void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
-{
- if (!HAS_DISPLAY(i915))
- return;
-
- /*
- * Due to the hpd irq storm handling the hotplug work can re-arm the
- * poll handlers. Hence disable polling after hpd handling is shut down.
- */
- intel_hpd_poll_fini(i915);
-
- /* poll work can call into fbdev, hence clean that up afterwards */
- intel_fbdev_fini(i915);
-
- intel_unregister_dsm_handler();
-
- /* flush any delayed tasks or pending work */
- flush_scheduled_work();
-
- intel_hdcp_component_fini(i915);
-
- intel_mode_config_cleanup(i915);
-
- intel_overlay_cleanup(i915);
-
- intel_gmbus_teardown(i915);
-
- destroy_workqueue(i915->display.wq.flip);
- destroy_workqueue(i915->display.wq.modeset);
-
- intel_fbc_cleanup(i915);
-}
-
-/* part #3: call after gem init */
-void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
-{
- intel_dmc_fini(i915);
-
- intel_power_domains_driver_remove(i915);
-
- intel_vga_unregister(i915);
-
- intel_bios_driver_remove(i915);
-}
-
-bool intel_modeset_probe_defer(struct pci_dev *pdev)
-{
- struct drm_privacy_screen *privacy_screen;
-
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (vga_switcheroo_client_probe_defer(pdev))
- return true;
-
- /* If the LCD panel has a privacy-screen, wait for it */
- privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
- if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
- return true;
-
- drm_privacy_screen_put(privacy_screen);
-
- return false;
-}
-
-void intel_display_driver_register(struct drm_i915_private *i915)
-{
- if (!HAS_DISPLAY(i915))
- return;
-
- /* Must be done after probing outputs */
- intel_opregion_register(i915);
- intel_acpi_video_register(i915);
-
- intel_audio_init(i915);
-
- intel_display_debugfs_register(i915);
-
- /*
- * Some ports require correctly set-up hpd registers for
- * detection to work properly (leading to ghost connected
- * connector status), e.g. VGA on gm45. Hence we can only set
- * up the initial fbdev config after hpd irqs are fully
- * enabled. We do it last so that the async config cannot run
- * before the connectors are registered.
- */
- intel_fbdev_initial_config_async(i915);
-
- /*
- * We need to coordinate the hotplugs with the asynchronous
- * fbdev configuration, for which we use the
- * fbdev->async_cookie.
- */
- drm_kms_helper_poll_init(&i915->drm);
-}
-
-void intel_display_driver_unregister(struct drm_i915_private *i915)
-{
- if (!HAS_DISPLAY(i915))
- return;
-
- intel_fbdev_unregister(i915);
- intel_audio_deinit(i915);
-
- /*
- * After flushing the fbdev (incl. a late async config which
- * will have delayed queuing of a hotplug event), then flush
- * the hotplug events.
- */
- drm_kms_helper_poll_fini(&i915->drm);
- drm_atomic_helper_shutdown(&i915->drm);
-
- acpi_video_unregister();
- intel_opregion_unregister(i915);
-}
-
bool intel_scanout_needs_vtd_wa(struct drm_i915_private *i915)
{
return DISPLAY_VER(i915) >= 6 && i915_vtd_active(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 287159bdeb0d..ac95961f68ba 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -56,13 +56,13 @@ struct intel_dp;
struct intel_encoder;
struct intel_initial_plane_config;
struct intel_link_m_n;
-struct intel_load_detect_pipe;
struct intel_plane;
struct intel_plane_state;
struct intel_power_domain_mask;
struct intel_remapped_info;
struct intel_rotation_info;
struct pci_dev;
+struct work_struct;
#define pipe_name(p) ((p) + 'A')
@@ -437,7 +437,6 @@ void intel_add_fb_offsets(int *x, int *y,
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
-int intel_display_suspend(struct drm_device *dev);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
@@ -455,20 +454,12 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port,
unsigned int expected_mask);
-int intel_get_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
bool intel_fuzzy_clock_check(int clock1, int clock2);
-void intel_display_prepare_reset(struct drm_i915_private *dev_priv);
-void intel_display_finish_reset(struct drm_i915_private *dev_priv);
void intel_zero_m_n(struct intel_link_m_n *m_n);
void intel_set_m_n(struct drm_i915_private *i915,
const struct intel_link_m_n *m_n,
@@ -518,21 +509,9 @@ void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
bool visible);
void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state);
-void intel_display_driver_register(struct drm_i915_private *i915);
-void intel_display_driver_unregister(struct drm_i915_private *i915);
-
void intel_update_watermarks(struct drm_i915_private *i915);
/* modesetting */
-bool intel_modeset_probe_defer(struct pci_dev *pdev);
-void intel_modeset_init_hw(struct drm_i915_private *i915);
-int intel_modeset_init_noirq(struct drm_i915_private *i915);
-int intel_modeset_init_nogem(struct drm_i915_private *i915);
-int intel_modeset_init(struct drm_i915_private *i915);
-void intel_modeset_driver_remove(struct drm_i915_private *i915);
-void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
-void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915);
-void intel_display_resume(struct drm_device *dev);
int intel_modeset_all_pipes(struct intel_atomic_state *state,
const char *reason);
void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
@@ -540,6 +519,19 @@ void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc,
struct intel_power_domain_mask *domains);
+/* interface for intel_display_driver.c */
+void intel_setup_outputs(struct drm_i915_private *i915);
+int intel_initial_commit(struct drm_device *dev);
+void intel_panel_sanitize_ssc(struct drm_i915_private *i915);
+void intel_update_czclk(struct drm_i915_private *i915);
+void intel_atomic_helper_free_state_worker(struct work_struct *work);
+enum drm_mode_status intel_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
+ bool nonblock);
+
+void intel_hpd_poll_fini(struct drm_i915_private *i915);
+
/* modesetting asserts */
void assert_transcoder(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, bool state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 45113ae107ba..e72288662f02 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -12,6 +12,7 @@
#include "i915_debugfs.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_debugfs.h"
@@ -30,7 +31,6 @@
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
-#include "intel_sprite.h"
#include "intel_wm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -1094,6 +1094,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
ARRAY_SIZE(intel_display_debugfs_list),
minor->debugfs_root, minor);
+ intel_cdclk_debugfs_register(i915);
intel_dmc_debugfs_register(i915);
intel_fbc_debugfs_register(i915);
intel_hpd_debugfs_register(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
new file mode 100644
index 000000000000..60ce10fc7205
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ *
+ * High level display driver entry points. This is a layer between top level
+ * driver code and low level display functionality; no low level display code or
+ * details here.
+ */
+
+#include <linux/vga_switcheroo.h>
+#include <acpi/video.h>
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_privacy_screen_consumer.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "i915_drv.h"
+#include "i9xx_wm.h"
+#include "intel_acpi.h"
+#include "intel_atomic.h"
+#include "intel_audio.h"
+#include "intel_bios.h"
+#include "intel_bw.h"
+#include "intel_cdclk.h"
+#include "intel_color.h"
+#include "intel_crtc.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_driver.h"
+#include "intel_display_power.h"
+#include "intel_display_types.h"
+#include "intel_dkl_phy.h"
+#include "intel_dmc.h"
+#include "intel_dp.h"
+#include "intel_dpll.h"
+#include "intel_dpll_mgr.h"
+#include "intel_fb.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
+#include "intel_fdi.h"
+#include "intel_gmbus.h"
+#include "intel_hdcp.h"
+#include "intel_hotplug.h"
+#include "intel_hti.h"
+#include "intel_modeset_setup.h"
+#include "intel_opregion.h"
+#include "intel_overlay.h"
+#include "intel_plane_initial.h"
+#include "intel_pps.h"
+#include "intel_quirks.h"
+#include "intel_vga.h"
+#include "intel_wm.h"
+#include "skl_watermark.h"
+
+bool intel_display_driver_probe_defer(struct pci_dev *pdev)
+{
+ struct drm_privacy_screen *privacy_screen;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return true;
+
+ /* If the LCD panel has a privacy-screen, wait for it */
+ privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
+ if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
+ return true;
+
+ drm_privacy_screen_put(privacy_screen);
+
+ return false;
+}
+
+void intel_display_driver_init_hw(struct drm_i915_private *i915)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ cdclk_state = to_intel_cdclk_state(i915->display.cdclk.obj.state);
+
+ intel_update_cdclk(i915);
+ intel_cdclk_dump_config(i915, &i915->display.cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw;
+}
+
+static const struct drm_mode_config_funcs intel_mode_funcs = {
+ .fb_create = intel_user_framebuffer_create,
+ .get_format_info = intel_fb_get_format_info,
+ .output_poll_changed = intel_fbdev_output_poll_changed,
+ .mode_valid = intel_mode_valid,
+ .atomic_check = intel_atomic_check,
+ .atomic_commit = intel_atomic_commit,
+ .atomic_state_alloc = intel_atomic_state_alloc,
+ .atomic_state_clear = intel_atomic_state_clear,
+ .atomic_state_free = intel_atomic_state_free,
+};
+
+static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = {
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+static void intel_mode_config_init(struct drm_i915_private *i915)
+{
+ struct drm_mode_config *mode_config = &i915->drm.mode_config;
+
+ drm_mode_config_init(&i915->drm);
+ INIT_LIST_HEAD(&i915->display.global.obj_list);
+
+ mode_config->min_width = 0;
+ mode_config->min_height = 0;
+
+ mode_config->preferred_depth = 24;
+ mode_config->prefer_shadow = 1;
+
+ mode_config->funcs = &intel_mode_funcs;
+ mode_config->helper_private = &intel_mode_config_funcs;
+
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
+
+ /*
+ * Maximum framebuffer dimensions, chosen to match
+ * the maximum render engine surface size on gen4+.
+ */
+ if (DISPLAY_VER(i915) >= 7) {
+ mode_config->max_width = 16384;
+ mode_config->max_height = 16384;
+ } else if (DISPLAY_VER(i915) >= 4) {
+ mode_config->max_width = 8192;
+ mode_config->max_height = 8192;
+ } else if (DISPLAY_VER(i915) == 3) {
+ mode_config->max_width = 4096;
+ mode_config->max_height = 4096;
+ } else {
+ mode_config->max_width = 2048;
+ mode_config->max_height = 2048;
+ }
+
+ if (IS_I845G(i915) || IS_I865G(i915)) {
+ mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
+ mode_config->cursor_height = 1023;
+ } else if (IS_I830(i915) || IS_I85X(i915) ||
+ IS_I915G(i915) || IS_I915GM(i915)) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
+ } else {
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
+ }
+}
+
+static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+{
+ intel_atomic_global_obj_cleanup(i915);
+ drm_mode_config_cleanup(&i915->drm);
+}
+
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
+void intel_display_driver_early_probe(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_dkl_phy_init(i915);
+ intel_color_init_hooks(i915);
+ intel_init_cdclk_hooks(i915);
+ intel_audio_hooks_init(i915);
+ intel_dpll_init_clock_hook(i915);
+ intel_init_display_hooks(i915);
+ intel_fdi_init_hook(i915);
+}
+
+/* part #1: call before irq install */
+int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (i915_inject_probe_failure(i915))
+ return -ENODEV;
+
+ if (HAS_DISPLAY(i915)) {
+ ret = drm_vblank_init(&i915->drm,
+ INTEL_NUM_PIPES(i915));
+ if (ret)
+ return ret;
+ }
+
+ intel_bios_init(i915);
+
+ ret = intel_vga_register(i915);
+ if (ret)
+ goto cleanup_bios;
+
+ /* FIXME: completely on the wrong abstraction layer */
+ ret = intel_power_domains_init(i915);
+ if (ret < 0)
+ goto cleanup_vga;
+
+ intel_power_domains_init_hw(i915, false);
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ intel_dmc_init(i915);
+
+ i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
+ i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI |
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+
+ intel_mode_config_init(i915);
+
+ ret = intel_cdclk_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ ret = intel_color_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ ret = intel_dbuf_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ ret = intel_bw_init(i915);
+ if (ret)
+ goto cleanup_vga_client_pw_domain_dmc;
+
+ init_llist_head(&i915->display.atomic_helper.free_list);
+ INIT_WORK(&i915->display.atomic_helper.free_work,
+ intel_atomic_helper_free_state_worker);
+
+ intel_init_quirks(i915);
+
+ intel_fbc_init(i915);
+
+ return 0;
+
+cleanup_vga_client_pw_domain_dmc:
+ intel_dmc_fini(i915);
+ intel_power_domains_driver_remove(i915);
+cleanup_vga:
+ intel_vga_unregister(i915);
+cleanup_bios:
+ intel_bios_driver_remove(i915);
+
+ return ret;
+}
+
+/* part #2: call after irq install, but before gem init */
+int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ intel_wm_init(i915);
+
+ intel_panel_sanitize_ssc(i915);
+
+ intel_pps_setup(i915);
+
+ intel_gmbus_setup(i915);
+
+ drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+
+ for_each_pipe(i915, pipe) {
+ ret = intel_crtc_init(i915, pipe);
+ if (ret) {
+ intel_mode_config_cleanup(i915);
+ return ret;
+ }
+ }
+
+ intel_plane_possible_crtcs_init(i915);
+ intel_shared_dpll_init(i915);
+ intel_fdi_pll_freq_update(i915);
+
+ intel_update_czclk(i915);
+ intel_display_driver_init_hw(i915);
+ intel_dpll_update_ref_clks(i915);
+
+ intel_hdcp_component_init(i915);
+
+ if (i915->display.cdclk.max_cdclk_freq == 0)
+ intel_update_max_cdclk(i915);
+
+ intel_hti_init(i915);
+
+ /* Just disable it once at startup */
+ intel_vga_disable(i915);
+ intel_setup_outputs(i915);
+
+ drm_modeset_lock_all(dev);
+ intel_modeset_setup_hw_state(i915, dev->mode_config.acquire_ctx);
+ intel_acpi_assign_connector_fwnodes(i915);
+ drm_modeset_unlock_all(dev);
+
+ for_each_intel_crtc(dev, crtc) {
+ if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
+ continue;
+ intel_crtc_initial_plane_config(crtc);
+ }
+
+ /*
+ * Make sure hardware watermarks really match the state we read out.
+ * Note that we need to do this after reconstructing the BIOS fb's
+ * since the watermark calculation done here will use pstate->fb.
+ */
+ if (!HAS_GMCH(i915))
+ ilk_wm_sanitize(i915);
+
+ return 0;
+}
+
+/* part #3: call after gem init */
+int intel_display_driver_probe(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ /*
+ * Force all active planes to recompute their states. So that on
+ * mode_setcrtc after probe, all the intel_plane_state variables
+ * are already calculated and there is no assert_plane warnings
+ * during bootup.
+ */
+ ret = intel_initial_commit(&i915->drm);
+ if (ret)
+ drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
+
+ intel_overlay_setup(i915);
+
+ ret = intel_fbdev_init(&i915->drm);
+ if (ret)
+ return ret;
+
+ /* Only enable hotplug handling once the fbdev is fully set up. */
+ intel_hpd_init(i915);
+ intel_hpd_poll_disable(i915);
+
+ skl_watermark_ipc_init(i915);
+
+ return 0;
+}
+
+void intel_display_driver_register(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ /* Must be done after probing outputs */
+ intel_opregion_register(i915);
+ intel_acpi_video_register(i915);
+
+ intel_audio_init(i915);
+
+ intel_display_debugfs_register(i915);
+
+ /*
+ * Some ports require correctly set-up hpd registers for
+ * detection to work properly (leading to ghost connected
+ * connector status), e.g. VGA on gm45. Hence we can only set
+ * up the initial fbdev config after hpd irqs are fully
+ * enabled. We do it last so that the async config cannot run
+ * before the connectors are registered.
+ */
+ intel_fbdev_initial_config_async(i915);
+
+ /*
+ * We need to coordinate the hotplugs with the asynchronous
+ * fbdev configuration, for which we use the
+ * fbdev->async_cookie.
+ */
+ drm_kms_helper_poll_init(&i915->drm);
+}
+
+/* part #1: call before irq uninstall */
+void intel_display_driver_remove(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ flush_workqueue(i915->display.wq.flip);
+ flush_workqueue(i915->display.wq.modeset);
+
+ flush_work(&i915->display.atomic_helper.free_work);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
+
+ /*
+ * MST topology needs to be suspended so we don't have any calls to
+ * fbdev after it's finalized. MST will be destroyed later as part of
+ * drm_mode_config_cleanup()
+ */
+ intel_dp_mst_suspend(i915);
+}
+
+/* part #2: call after irq uninstall */
+void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ /*
+ * Due to the hpd irq storm handling the hotplug work can re-arm the
+ * poll handlers. Hence disable polling after hpd handling is shut down.
+ */
+ intel_hpd_poll_fini(i915);
+
+ /* poll work can call into fbdev, hence clean that up afterwards */
+ intel_fbdev_fini(i915);
+
+ intel_unregister_dsm_handler();
+
+ /* flush any delayed tasks or pending work */
+ flush_scheduled_work();
+
+ intel_hdcp_component_fini(i915);
+
+ intel_mode_config_cleanup(i915);
+
+ intel_overlay_cleanup(i915);
+
+ intel_gmbus_teardown(i915);
+
+ destroy_workqueue(i915->display.wq.flip);
+ destroy_workqueue(i915->display.wq.modeset);
+
+ intel_fbc_cleanup(i915);
+}
+
+/* part #3: call after gem init */
+void intel_display_driver_remove_nogem(struct drm_i915_private *i915)
+{
+ intel_dmc_fini(i915);
+
+ intel_power_domains_driver_remove(i915);
+
+ intel_vga_unregister(i915);
+
+ intel_bios_driver_remove(i915);
+}
+
+void intel_display_driver_unregister(struct drm_i915_private *i915)
+{
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ intel_fbdev_unregister(i915);
+ intel_audio_deinit(i915);
+
+ /*
+ * After flushing the fbdev (incl. a late async config which
+ * will have delayed queuing of a hotplug event), then flush
+ * the hotplug events.
+ */
+ drm_kms_helper_poll_fini(&i915->drm);
+ drm_atomic_helper_shutdown(&i915->drm);
+
+ acpi_video_unregister();
+ intel_opregion_unregister(i915);
+}
+
+/*
+ * turn all crtc's off, but do not adjust state
+ * This has to be paired with a call to intel_modeset_setup_hw_state.
+ */
+int intel_display_driver_suspend(struct drm_i915_private *i915)
+{
+ struct drm_atomic_state *state;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return 0;
+
+ state = drm_atomic_helper_suspend(&i915->drm);
+ ret = PTR_ERR_OR_ZERO(state);
+ if (ret)
+ drm_err(&i915->drm, "Suspending crtc's failed with %i\n",
+ ret);
+ else
+ i915->display.restore.modeset_state = state;
+ return ret;
+}
+
+int
+__intel_display_driver_resume(struct drm_i915_private *i915,
+ struct drm_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ intel_modeset_setup_hw_state(i915, ctx);
+ intel_vga_redisable(i915);
+
+ if (!state)
+ return 0;
+
+ /*
+ * We've duplicated the state, pointers to the old state are invalid.
+ *
+ * Don't attempt to use the old state until we commit the duplicated state.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * Force recalculation even if we restore
+ * current state. With fast modeset this may not result
+ * in a modeset when the state is compatible.
+ */
+ crtc_state->mode_changed = true;
+ }
+
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ if (!HAS_GMCH(i915))
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
+ ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+
+ drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+
+ return ret;
+}
+
+void intel_display_driver_resume(struct drm_i915_private *i915)
+{
+ struct drm_atomic_state *state = i915->display.restore.modeset_state;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ i915->display.restore.modeset_state = NULL;
+ if (state)
+ state->acquire_ctx = &ctx;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(&i915->drm, &ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (!ret)
+ ret = __intel_display_driver_resume(i915, state, &ctx);
+
+ skl_watermark_ipc_update(i915);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ if (ret)
+ drm_err(&i915->drm,
+ "Restoring old state failed with %i\n", ret);
+ if (state)
+ drm_atomic_state_put(state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.h b/drivers/gpu/drm/i915/display/intel_display_driver.h
new file mode 100644
index 000000000000..c276a58ee329
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DRIVER_H__
+#define __INTEL_DISPLAY_DRIVER_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct pci_dev;
+
+bool intel_display_driver_probe_defer(struct pci_dev *pdev);
+void intel_display_driver_init_hw(struct drm_i915_private *i915);
+void intel_display_driver_early_probe(struct drm_i915_private *i915);
+int intel_display_driver_probe_noirq(struct drm_i915_private *i915);
+int intel_display_driver_probe_nogem(struct drm_i915_private *i915);
+int intel_display_driver_probe(struct drm_i915_private *i915);
+void intel_display_driver_register(struct drm_i915_private *i915);
+void intel_display_driver_remove(struct drm_i915_private *i915);
+void intel_display_driver_remove_noirq(struct drm_i915_private *i915);
+void intel_display_driver_remove_nogem(struct drm_i915_private *i915);
+void intel_display_driver_unregister(struct drm_i915_private *i915);
+int intel_display_driver_suspend(struct drm_i915_private *i915);
+void intel_display_driver_resume(struct drm_i915_private *i915);
+
+/* interface for intel_display_reset.c */
+int __intel_display_driver_resume(struct drm_i915_private *i915,
+ struct drm_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif /* __INTEL_DISPLAY_DRIVER_H__ */
+
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 7c9f4288329e..5150069f3f82 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 6645eb1911d8..1118ee9d224c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1251,22 +1251,11 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
POWER_DOMAIN_PIPE_PANEL_FITTER_A,
POWER_DOMAIN_INIT);
-#define XELPD_PW_2_POWER_DOMAINS \
- XELPD_PW_B_POWER_DOMAINS, \
- XELPD_PW_C_POWER_DOMAINS, \
- XELPD_PW_D_POWER_DOMAINS, \
- POWER_DOMAIN_PORT_DDI_LANES_C, \
- POWER_DOMAIN_PORT_DDI_LANES_D, \
- POWER_DOMAIN_PORT_DDI_LANES_E, \
+#define XELPD_DC_OFF_PORT_POWER_DOMAINS \
POWER_DOMAIN_PORT_DDI_LANES_TC1, \
POWER_DOMAIN_PORT_DDI_LANES_TC2, \
POWER_DOMAIN_PORT_DDI_LANES_TC3, \
POWER_DOMAIN_PORT_DDI_LANES_TC4, \
- POWER_DOMAIN_VGA, \
- POWER_DOMAIN_AUDIO_PLAYBACK, \
- POWER_DOMAIN_AUX_IO_C, \
- POWER_DOMAIN_AUX_IO_D, \
- POWER_DOMAIN_AUX_IO_E, \
POWER_DOMAIN_AUX_C, \
POWER_DOMAIN_AUX_D, \
POWER_DOMAIN_AUX_E, \
@@ -1279,6 +1268,20 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_a,
POWER_DOMAIN_AUX_TBT3, \
POWER_DOMAIN_AUX_TBT4
+#define XELPD_PW_2_POWER_DOMAINS \
+ XELPD_PW_B_POWER_DOMAINS, \
+ XELPD_PW_C_POWER_DOMAINS, \
+ XELPD_PW_D_POWER_DOMAINS, \
+ POWER_DOMAIN_PORT_DDI_LANES_C, \
+ POWER_DOMAIN_PORT_DDI_LANES_D, \
+ POWER_DOMAIN_PORT_DDI_LANES_E, \
+ POWER_DOMAIN_VGA, \
+ POWER_DOMAIN_AUDIO_PLAYBACK, \
+ POWER_DOMAIN_AUX_IO_C, \
+ POWER_DOMAIN_AUX_IO_D, \
+ POWER_DOMAIN_AUX_IO_E, \
+ XELPD_DC_OFF_PORT_POWER_DOMAINS
+
I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
XELPD_PW_2_POWER_DOMAINS,
POWER_DOMAIN_INIT);
@@ -1301,7 +1304,9 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_pw_2,
*/
I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off,
- XELPD_PW_2_POWER_DOMAINS,
+ XELPD_DC_OFF_PORT_POWER_DOMAINS,
+ XELPD_PW_C_POWER_DOMAINS,
+ XELPD_PW_D_POWER_DOMAINS,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_AUDIO_MMIO,
POWER_DOMAIN_AUX_A,
@@ -1310,14 +1315,18 @@ I915_DECL_PW_DOMAINS(xelpd_pwdoms_dc_off,
POWER_DOMAIN_DC_OFF,
POWER_DOMAIN_INIT);
-static const struct i915_power_well_desc xelpd_power_wells_main[] = {
+static const struct i915_power_well_desc xelpd_power_wells_dc_off[] = {
{
.instances = &I915_PW_INSTANCES(
I915_PW("DC_off", &xelpd_pwdoms_dc_off,
.id = SKL_DISP_DC_OFF),
),
.ops = &gen9_dc_off_power_well_ops,
- }, {
+ }
+};
+
+static const struct i915_power_well_desc xelpd_power_wells_main[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("PW_2", &xelpd_pwdoms_pw_2,
.hsw.idx = ICL_PW_CTL_IDX_PW_2,
@@ -1378,6 +1387,11 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = {
I915_PW("AUX_C", &icl_pwdoms_aux_c, .hsw.idx = ICL_PW_CTL_IDX_AUX_C),
I915_PW("AUX_D", &icl_pwdoms_aux_d, .hsw.idx = XELPD_PW_CTL_IDX_AUX_D),
I915_PW("AUX_E", &icl_pwdoms_aux_e, .hsw.idx = XELPD_PW_CTL_IDX_AUX_E),
+ ),
+ .ops = &icl_aux_power_well_ops,
+ .fixed_enable_delay = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
I915_PW("AUX_USBC1", &tgl_pwdoms_aux_usbc1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1),
I915_PW("AUX_USBC2", &tgl_pwdoms_aux_usbc2, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2),
I915_PW("AUX_USBC3", &tgl_pwdoms_aux_usbc3, .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3),
@@ -1385,6 +1399,8 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = {
),
.ops = &icl_aux_power_well_ops,
.fixed_enable_delay = true,
+ /* WA_14017248603: adlp */
+ .enable_timeout = 500,
}, {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_TBT1", &icl_pwdoms_aux_tbt1, .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1),
@@ -1400,6 +1416,34 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = {
static const struct i915_power_well_desc_list xelpd_power_wells[] = {
I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off),
+ I915_PW_DESCRIPTORS(xelpd_power_wells_main),
+};
+
+I915_DECL_PW_DOMAINS(xehpd_pwdoms_dc_off,
+ XELPD_PW_2_POWER_DOMAINS,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_AUDIO_MMIO,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_DC_OFF,
+ POWER_DOMAIN_INIT);
+
+static const struct i915_power_well_desc xehpd_power_wells_dc_off[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("DC_off", &xehpd_pwdoms_dc_off,
+ .id = SKL_DISP_DC_OFF),
+ ),
+ .ops = &gen9_dc_off_power_well_ops,
+ }
+};
+
+static const struct i915_power_well_desc_list xehpd_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xehpd_power_wells_dc_off),
I915_PW_DESCRIPTORS(xelpd_power_wells_main),
};
@@ -1423,15 +1467,6 @@ I915_DECL_PW_DOMAINS(xelpdp_pwdoms_pw_2,
XELPDP_PW_2_POWER_DOMAINS,
POWER_DOMAIN_INIT);
-I915_DECL_PW_DOMAINS(xelpdp_pwdoms_dc_off,
- XELPDP_PW_2_POWER_DOMAINS,
- POWER_DOMAIN_AUDIO_MMIO,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_DC_OFF,
- POWER_DOMAIN_INIT);
-
I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc1,
POWER_DOMAIN_AUX_USBC1,
POWER_DOMAIN_AUX_TBT1);
@@ -1451,12 +1486,6 @@ I915_DECL_PW_DOMAINS(xelpdp_pwdoms_aux_tc4,
static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
{
.instances = &I915_PW_INSTANCES(
- I915_PW("DC_off", &xelpdp_pwdoms_dc_off,
- .id = SKL_DISP_DC_OFF),
- ),
- .ops = &gen9_dc_off_power_well_ops,
- }, {
- .instances = &I915_PW_INSTANCES(
I915_PW("PW_2", &xelpdp_pwdoms_pw_2,
.hsw.idx = ICL_PW_CTL_IDX_PW_2,
.id = SKL_DISP_PW_2),
@@ -1512,6 +1541,7 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
};
@@ -1624,6 +1654,8 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
if (DISPLAY_VER(i915) >= 14)
return set_power_wells(power_domains, xelpdp_power_wells);
+ else if (IS_DG2(i915))
+ return set_power_wells(power_domains, xehpd_power_wells);
else if (DISPLAY_VER(i915) >= 13)
return set_power_wells(power_domains, xelpd_power_wells);
else if (IS_DG1(i915))
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 62bafcbc7937..41eabdf3e871 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
@@ -253,6 +254,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
int pw_idx = i915_power_well_instance(power_well)->hsw.idx;
+ int timeout = power_well->desc->enable_timeout ? : 1;
/*
* For some power wells we're not supposed to watch the status bit for
@@ -266,7 +268,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
if (intel_de_wait_for_set(dev_priv, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
+ HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index ba7cb977e7c7..e494df379e6c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -110,6 +110,8 @@ struct i915_power_well_desc {
* Thunderbolt mode.
*/
u16 is_tc_tbt:1;
+ /* Enable timeout if greater than the default 1ms */
+ u16 enable_timeout;
};
struct i915_power_well {
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
new file mode 100644
index 000000000000..17178d5d7788
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_atomic_helper.h>
+
+#include "i915_drv.h"
+#include "intel_clock_gating.h"
+#include "intel_display_driver.h"
+#include "intel_display_reset.h"
+#include "intel_display_types.h"
+#include "intel_hotplug.h"
+#include "intel_pps.h"
+
+static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
+{
+ return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+ intel_has_gpu_reset(to_gt(dev_priv)));
+}
+
+void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
+{
+ struct drm_modeset_acquire_ctx *ctx = &dev_priv->display.restore.reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ /* reset doesn't touch the display */
+ if (!dev_priv->params.force_reset_modeset_test &&
+ !gpu_reset_clobbers_display(dev_priv))
+ return;
+
+ /* We have a modeset vs reset deadlock, defensively unbreak it. */
+ set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
+
+ if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset potentially stuck, unbreaking through wedging\n");
+ intel_gt_set_wedged(to_gt(dev_priv));
+ }
+
+ /*
+ * Need mode_config.mutex so that we don't
+ * trample ongoing ->detect() and whatnot.
+ */
+ mutex_lock(&dev_priv->drm.mode_config.mutex);
+ drm_modeset_acquire_init(ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(&dev_priv->drm, ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(ctx);
+ }
+ /*
+ * Disabling the crtcs gracefully seems nicer. Also the
+ * g33 docs say we should at least disable all the planes.
+ */
+ state = drm_atomic_helper_duplicate_state(&dev_priv->drm, ctx);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ ret);
+ return;
+ }
+
+ ret = drm_atomic_helper_disable_all(&dev_priv->drm, ctx);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
+ drm_atomic_state_put(state);
+ return;
+ }
+
+ dev_priv->display.restore.modeset_state = state;
+ state->acquire_ctx = ctx;
+}
+
+void intel_display_reset_finish(struct drm_i915_private *i915)
+{
+ struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ /* reset doesn't touch the display */
+ if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
+ return;
+
+ state = fetch_and_zero(&i915->display.restore.modeset_state);
+ if (!state)
+ goto unlock;
+
+ /* reset doesn't touch the display */
+ if (!gpu_reset_clobbers_display(i915)) {
+ /* for testing only restore the display */
+ ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+ if (ret) {
+ drm_WARN_ON(&i915->drm, ret == -EDEADLK);
+ drm_err(&i915->drm,
+ "Restoring old state failed with %i\n", ret);
+ }
+ } else {
+ /*
+ * The display has been reset as well,
+ * so need a full re-initialization.
+ */
+ intel_pps_unlock_regs_wa(i915);
+ intel_display_driver_init_hw(i915);
+ intel_clock_gating_init(i915);
+ intel_hpd_init(i915);
+
+ ret = __intel_display_driver_resume(i915, state, ctx);
+ if (ret)
+ drm_err(&i915->drm,
+ "Restoring old state failed with %i\n", ret);
+
+ intel_hpd_poll_disable(i915);
+ }
+
+ drm_atomic_state_put(state);
+unlock:
+ drm_modeset_drop_locks(ctx);
+ drm_modeset_acquire_fini(ctx);
+ mutex_unlock(&i915->drm.mode_config.mutex);
+
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(i915)->reset.flags);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.h b/drivers/gpu/drm/i915/display/intel_display_reset.h
new file mode 100644
index 000000000000..f06d0d35b86b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_RESET_H__
+#define __INTEL_RESET_H__
+
+struct drm_i915_private;
+
+void intel_display_reset_prepare(struct drm_i915_private *i915);
+void intel_display_reset_finish(struct drm_i915_private *i915);
+
+#endif /* __INTEL_RESET_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 651ea8564e1b..99bdb833591c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -14,7 +14,6 @@
#include <linux/tracepoint.h>
#include "i915_drv.h"
-#include "i915_irq.h"
#include "intel_crtc.h"
#include "intel_display_types.h"
#include "intel_vblank.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 47395b39c8f4..270c4c84a292 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -643,6 +643,9 @@ struct intel_atomic_state {
struct __intel_global_objs_state *global_objs;
int num_global_objs;
+ /* Internal commit, as opposed to userspace/client initiated one */
+ bool internal;
+
bool dpll_set, modeset;
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -980,6 +983,38 @@ struct intel_link_m_n {
u32 link_n;
};
+struct intel_csc_matrix {
+ u16 coeff[9];
+ u16 preoff[3];
+ u16 postoff[3];
+};
+
+struct intel_c10pll_state {
+ u32 clock; /* in KHz */
+ u8 tx;
+ u8 cmn;
+ u8 pll[20];
+};
+
+struct intel_c20pll_state {
+ u32 link_bit_rate;
+ u32 clock; /* in kHz */
+ u16 tx[3];
+ u16 cmn[4];
+ union {
+ u16 mplla[10];
+ u16 mpllb[11];
+ };
+};
+
+struct intel_cx0pll_state {
+ union {
+ struct intel_c10pll_state c10;
+ struct intel_c20pll_state c20;
+ };
+ bool ssc_enabled;
+};
+
struct intel_crtc_state {
/*
* uapi (drm) state. This is the software state shown to userspace.
@@ -1021,6 +1056,8 @@ struct intel_crtc_state {
/* actual state of LUTs */
struct drm_property_blob *pre_csc_lut, *post_csc_lut;
+ struct intel_csc_matrix csc, output_csc;
+
/**
* quirks - bitfield with hw state readout quirks
*
@@ -1123,6 +1160,7 @@ struct intel_crtc_state {
union {
struct intel_dpll_hw_state dpll_hw_state;
struct intel_mpllb_state mpllb_state;
+ struct intel_cx0pll_state cx0pll_state;
};
/*
@@ -1275,9 +1313,18 @@ struct intel_crtc_state {
/* HDMI High TMDS char rate ratio */
bool hdmi_high_tmds_clock_ratio;
- /* Output format RGB/YCBCR etc */
+ /*
+ * Output format RGB/YCBCR etc., that is coming out
+ * at the end of the pipe.
+ */
enum intel_output_format output_format;
+ /*
+ * Sink output format RGB/YCBCR etc., that is going
+ * into the sink.
+ */
+ enum intel_output_format sink_format;
+
/* enable pipe gamma? */
bool gamma_enable;
@@ -1728,6 +1775,7 @@ struct intel_dp {
int pcon_max_frl_bw;
u8 max_bpc;
bool ycbcr_444_to_420;
+ bool ycbcr420_passthrough;
bool rgb_to_ycbcr;
} dfp;
@@ -1814,10 +1862,6 @@ struct intel_dp_mst_encoder {
struct intel_connector *connector;
};
-struct intel_load_detect_pipe {
- struct drm_atomic_state *restore_state;
-};
-
static inline struct intel_encoder *
intel_attached_encoder(struct intel_connector *connector)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
index 57cc3edba016..a001232ad445 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c
@@ -11,6 +11,15 @@
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
+/**
+ * intel_dkl_phy_init - initialize Dekel PHY
+ * @i915: i915 device instance
+ */
+void intel_dkl_phy_init(struct drm_i915_private *i915)
+{
+ spin_lock_init(&i915->display.dkl.phy_lock);
+}
+
static void
dkl_phy_set_hip_idx(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
index 570ee36f9386..5956ec3e940b 100644
--- a/drivers/gpu/drm/i915/display/intel_dkl_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h
@@ -12,6 +12,7 @@
struct drm_i915_private;
+void intel_dkl_phy_init(struct drm_i915_private *i915);
u32
intel_dkl_phy_read(struct drm_i915_private *i915, struct intel_dkl_phy_reg reg);
void
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f0bace9d98a1..0cc57681dc4d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -44,8 +44,8 @@
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -53,6 +53,7 @@
#include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
+#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -421,6 +422,18 @@ static int ehl_max_source_rate(struct intel_dp *intel_dp)
return 810000;
}
+static int mtl_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_is_c10phy(i915, phy))
+ return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
+
+ return 2000000;
+}
+
static int vbt_max_link_rate(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -445,6 +458,10 @@ static void
intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
/* The values must be in increasing order */
+ static const int mtl_rates[] = {
+ 162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
+ 810000, 1000000, 1350000, 2000000,
+ };
static const int icl_rates[] = {
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
1000000, 1350000,
@@ -470,7 +487,11 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm,
intel_dp->source_rates || intel_dp->num_source_rates);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ source_rates = mtl_rates;
+ size = ARRAY_SIZE(mtl_rates);
+ max_rate = mtl_max_source_rate(intel_dp);
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
source_rates = icl_rates;
size = ARRAY_SIZE(icl_rates);
if (IS_DG2(dev_priv))
@@ -828,26 +849,88 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
+static bool source_can_output(struct intel_dp *intel_dp,
+ enum intel_output_format format)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ switch (format) {
+ case INTEL_OUTPUT_FORMAT_RGB:
+ return true;
+
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ /*
+ * No YCbCr output support on gmch platforms.
+ * Also, ILK doesn't seem capable of DP YCbCr output.
+ * The displayed image is severly corrupted. SNB+ is fine.
+ */
+ return !HAS_GMCH(i915) && !IS_IRONLAKE(i915);
+
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ /* Platform < Gen 11 cannot output YCbCr420 format */
+ return DISPLAY_VER(i915) >= 11;
+
+ default:
+ MISSING_CASE(format);
+ return false;
+ }
+}
+
+static bool
+dfp_can_convert_from_rgb(struct intel_dp *intel_dp,
+ enum intel_output_format sink_format)
+{
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return false;
+
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ return intel_dp->dfp.rgb_to_ycbcr;
+
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return intel_dp->dfp.rgb_to_ycbcr &&
+ intel_dp->dfp.ycbcr_444_to_420;
+
+ return false;
+}
+
+static bool
+dfp_can_convert_from_ycbcr444(struct intel_dp *intel_dp,
+ enum intel_output_format sink_format)
+{
+ if (!drm_dp_is_branch(intel_dp->dpcd))
+ return false;
+
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ return intel_dp->dfp.ycbcr_444_to_420;
+
+ return false;
+}
+
static enum intel_output_format
intel_dp_output_format(struct intel_connector *connector,
- bool ycbcr_420_output)
+ enum intel_output_format sink_format)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ enum intel_output_format output_format;
if (intel_dp->force_dsc_output_format)
return intel_dp->force_dsc_output_format;
- if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output)
- return INTEL_OUTPUT_FORMAT_RGB;
+ if (sink_format == INTEL_OUTPUT_FORMAT_RGB ||
+ dfp_can_convert_from_rgb(intel_dp, sink_format))
+ output_format = INTEL_OUTPUT_FORMAT_RGB;
- if (intel_dp->dfp.rgb_to_ycbcr &&
- intel_dp->dfp.ycbcr_444_to_420)
- return INTEL_OUTPUT_FORMAT_RGB;
+ else if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR444 ||
+ dfp_can_convert_from_ycbcr444(intel_dp, sink_format))
+ output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
- if (intel_dp->dfp.ycbcr_444_to_420)
- return INTEL_OUTPUT_FORMAT_YCBCR444;
else
- return INTEL_OUTPUT_FORMAT_YCBCR420;
+ output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+
+ drm_WARN_ON(&i915->drm, !source_can_output(intel_dp, output_format));
+
+ return output_format;
}
int intel_dp_min_bpp(enum intel_output_format output_format)
@@ -871,13 +954,27 @@ static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
return bpp;
}
+static enum intel_output_format
+intel_dp_sink_format(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+
+ if (drm_mode_is_420_only(info, mode))
+ return INTEL_OUTPUT_FORMAT_YCBCR420;
+
+ return INTEL_OUTPUT_FORMAT_RGB;
+}
+
static int
intel_dp_mode_min_output_bpp(struct intel_connector *connector,
const struct drm_display_mode *mode)
{
- const struct drm_display_info *info = &connector->base.display_info;
- enum intel_output_format output_format =
- intel_dp_output_format(connector, drm_mode_is_420_only(info, mode));
+ enum intel_output_format output_format, sink_format;
+
+ sink_format = intel_dp_sink_format(connector, mode);
+
+ output_format = intel_dp_output_format(connector, sink_format);
return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
}
@@ -916,7 +1013,8 @@ static int intel_dp_max_tmds_clock(struct intel_dp *intel_dp)
static enum drm_mode_status
intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
- int clock, int bpc, bool ycbcr420_output,
+ int clock, int bpc,
+ enum intel_output_format sink_format,
bool respect_downstream_limits)
{
int tmds_clock, min_tmds_clock, max_tmds_clock;
@@ -924,7 +1022,7 @@ intel_dp_tmds_clock_valid(struct intel_dp *intel_dp,
if (!respect_downstream_limits)
return MODE_OK;
- tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+ tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
min_tmds_clock = intel_dp->dfp.min_tmds_clock;
max_tmds_clock = intel_dp_max_tmds_clock(intel_dp);
@@ -946,7 +1044,7 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
const struct drm_display_info *info = &connector->base.display_info;
enum drm_mode_status status;
- bool ycbcr_420_only;
+ enum intel_output_format sink_format;
/* If PCON supports FRL MODE, check FRL bandwidth constraints */
if (intel_dp->dfp.pcon_max_frl_bw) {
@@ -971,20 +1069,20 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
target_clock > intel_dp->dfp.max_dotclock)
return MODE_CLOCK_HIGH;
- ycbcr_420_only = drm_mode_is_420_only(info, mode);
+ sink_format = intel_dp_sink_format(connector, mode);
/* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
- 8, ycbcr_420_only, true);
+ 8, sink_format, true);
if (status != MODE_OK) {
- if (ycbcr_420_only ||
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
!connector->base.ycbcr_420_allowed ||
!drm_mode_is_420_also(info, mode))
return status;
-
+ sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
status = intel_dp_tmds_clock_valid(intel_dp, target_clock,
- 8, true, true);
+ 8, sink_format, true);
if (status != MODE_OK)
return status;
}
@@ -1041,6 +1139,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
+ if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -1048,13 +1149,6 @@ intel_dp_mode_valid(struct drm_connector *_connector,
mode_rate = intel_dp_link_required(target_clock,
intel_dp_mode_min_output_bpp(connector, mode));
- if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
- return MODE_H_ILLEGAL;
-
- /*
- * Output bpp is stored in 6.4 format so right shift by 4 to get the
- * integer value since we support only integer values of bpp.
- */
if (HAS_DSC(dev_priv) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
/*
@@ -1063,6 +1157,10 @@ intel_dp_mode_valid(struct drm_connector *_connector,
*/
int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
if (intel_dp_is_edp(intel_dp)) {
dsc_max_output_bpp =
drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
@@ -1220,19 +1318,10 @@ static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
}
-static bool intel_dp_is_ycbcr420(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
- intel_dp->dfp.ycbcr_444_to_420);
-}
-
static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
int bpc, bool respect_downstream_limits)
{
- bool ycbcr420_output = intel_dp_is_ycbcr420(intel_dp, crtc_state);
int clock = crtc_state->hw.adjusted_mode.crtc_clock;
/*
@@ -1252,8 +1341,8 @@ static int intel_dp_hdmi_compute_bpc(struct intel_dp *intel_dp,
for (; bpc >= 8; bpc -= 2) {
if (intel_hdmi_bpc_possible(crtc_state, bpc,
- intel_dp->has_hdmi_sink, ycbcr420_output) &&
- intel_dp_tmds_clock_valid(intel_dp, clock, bpc, ycbcr420_output,
+ intel_dp->has_hdmi_sink) &&
+ intel_dp_tmds_clock_valid(intel_dp, clock, bpc, crtc_state->sink_format,
respect_downstream_limits) == MODE_OK)
return bpc;
}
@@ -1601,6 +1690,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
pipe_config->dsc.slice_count =
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
+ if (!pipe_config->dsc.slice_count) {
+ drm_dbg_kms(&dev_priv->drm, "Unsupported Slice Count %d\n",
+ pipe_config->dsc.slice_count);
+ return -EINVAL;
+ }
} else {
u16 dsc_max_output_bpp = 0;
u8 dsc_dp_slice_count;
@@ -2092,23 +2186,27 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
- crtc_state->output_format = intel_dp_output_format(connector, ycbcr_420_only);
-
- if (ycbcr_420_only && !intel_dp_is_ycbcr420(intel_dp, crtc_state)) {
+ if (ycbcr_420_only && !connector->base.ycbcr_420_allowed) {
drm_dbg_kms(&i915->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ } else {
+ crtc_state->sink_format = intel_dp_sink_format(connector, adjusted_mode);
}
+ crtc_state->output_format = intel_dp_output_format(connector, crtc_state->sink_format);
+
ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
respect_downstream_limits);
if (ret) {
- if (intel_dp_is_ycbcr420(intel_dp, crtc_state) ||
+ if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
!connector->base.ycbcr_420_allowed ||
!drm_mode_is_420_also(info, adjusted_mode))
return ret;
- crtc_state->output_format = intel_dp_output_format(connector, true);
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ crtc_state->output_format = intel_dp_output_format(connector,
+ crtc_state->sink_format);
ret = intel_dp_compute_link_config(encoder, crtc_state, conn_state,
respect_downstream_limits);
}
@@ -2790,6 +2888,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ bool ycbcr444_to_420 = false;
+ bool rgb_to_ycbcr = false;
u8 tmp;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
@@ -2806,8 +2906,35 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n",
str_enable_disable(intel_dp->has_hdmi_sink));
- tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
- intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+ if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR420:
+ break;
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ ycbcr444_to_420 = true;
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ rgb_to_ycbcr = true;
+ ycbcr444_to_420 = true;
+ break;
+ default:
+ MISSING_CASE(crtc_state->output_format);
+ break;
+ }
+ } else if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ switch (crtc_state->output_format) {
+ case INTEL_OUTPUT_FORMAT_YCBCR444:
+ break;
+ case INTEL_OUTPUT_FORMAT_RGB:
+ rgb_to_ycbcr = true;
+ break;
+ default:
+ MISSING_CASE(crtc_state->output_format);
+ break;
+ }
+ }
+
+ tmp = ycbcr444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -2815,13 +2942,12 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
"Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n",
str_enable_disable(intel_dp->dfp.ycbcr_444_to_420));
- tmp = intel_dp->dfp.rgb_to_ycbcr ?
- DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
+ tmp = rgb_to_ycbcr ? DP_CONVERSION_BT709_RGB_YCBCR_ENABLE : 0;
if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0)
drm_dbg_kms(&i915->drm,
- "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
- str_enable_disable(tmp));
+ "Failed to %s protocol converter RGB->YCbCr conversion mode\n",
+ str_enable_disable(tmp));
}
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -4611,57 +4737,44 @@ intel_dp_update_dfp(struct intel_dp *intel_dp,
intel_dp_get_pcon_dsc_cap(intel_dp);
}
+static bool
+intel_dp_can_ycbcr420(struct intel_dp *intel_dp)
+{
+ if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420) &&
+ (!drm_dp_is_branch(intel_dp->dpcd) || intel_dp->dfp.ycbcr420_passthrough))
+ return true;
+
+ if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_RGB) &&
+ dfp_can_convert_from_rgb(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
+ return true;
+
+ if (source_can_output(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR444) &&
+ dfp_can_convert_from_ycbcr444(intel_dp, INTEL_OUTPUT_FORMAT_YCBCR420))
+ return true;
+
+ return false;
+}
+
static void
intel_dp_update_420(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr;
-
- /* No YCbCr output support on gmch platforms */
- if (HAS_GMCH(i915))
- return;
-
- /*
- * ILK doesn't seem capable of DP YCbCr output. The
- * displayed image is severly corrupted. SNB+ is fine.
- */
- if (IS_IRONLAKE(i915))
- return;
- is_branch = drm_dp_is_branch(intel_dp->dpcd);
- ycbcr_420_passthrough =
+ intel_dp->dfp.ycbcr420_passthrough =
drm_dp_downstream_420_passthrough(intel_dp->dpcd,
intel_dp->downstream_ports);
/* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */
- ycbcr_444_to_420 =
+ intel_dp->dfp.ycbcr_444_to_420 =
dp_to_dig_port(intel_dp)->lspcon.active ||
drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
intel_dp->downstream_ports);
- rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
- intel_dp->downstream_ports,
- DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
-
- if (DISPLAY_VER(i915) >= 11) {
- /* Let PCON convert from RGB->YCbCr if possible */
- if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) {
- intel_dp->dfp.rgb_to_ycbcr = true;
- intel_dp->dfp.ycbcr_444_to_420 = true;
- connector->base.ycbcr_420_allowed = true;
- } else {
- /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
- intel_dp->dfp.ycbcr_444_to_420 =
- ycbcr_444_to_420 && !ycbcr_420_passthrough;
-
- connector->base.ycbcr_420_allowed =
- !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
- }
- } else {
- /* 4:4:4->4:2:0 conversion is the only way */
- intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
+ intel_dp->dfp.rgb_to_ycbcr =
+ drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd,
+ intel_dp->downstream_ports,
+ DP_DS_HDMI_BT709_RGB_YCBCR_CONV);
- connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
- }
+ connector->base.ycbcr_420_allowed = intel_dp_can_ycbcr420(intel_dp);
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
@@ -5365,6 +5478,15 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
goto out_vdd_off;
}
+ /*
+ * Enable HPD sense for live status check.
+ * intel_hpd_irq_setup() will turn it off again
+ * if it's no longer needed later.
+ *
+ * The DPCD probe below will make sure VDD is on.
+ */
+ intel_hpd_enable_detection(encoder);
+
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -5376,6 +5498,24 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
goto out_vdd_off;
}
+ /*
+ * VBT and straps are liars. Also check HPD as that seems
+ * to be the most reliable piece of information available.
+ */
+ if (!intel_digital_port_connected(encoder)) {
+ /*
+ * If this fails, presume the DPCD answer came
+ * from some other port using the same AUX CH.
+ *
+ * FIXME maybe cleaner to check this before the
+ * DPCD read? Would need sort out the VDD handling...
+ */
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ goto out_vdd_off;
+ }
+
mutex_lock(&dev_priv->drm.mode_config.mutex);
drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc);
if (!drm_edid) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 705915d50565..abf77ba76972 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -268,6 +268,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
intel_pps_check_power_unlocked(intel_dp);
+ /*
+ * FIXME PSR should be disabled here to prevent
+ * it using the same AUX CH simultaneously
+ */
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = intel_de_read_notrace(i915, ch_ctl);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index d638054c74ac..e92c62bcc9b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -637,6 +637,38 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
return true;
}
+static void
+intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 link_config[2];
+
+ link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
+ link_config[1] = intel_dp_is_uhbr(crtc_state) ?
+ DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
+ drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
+}
+
+static void
+intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ u8 link_bw, u8 rate_select)
+{
+ u8 link_config[2];
+
+ /* Write the link configuration data */
+ link_config[0] = link_bw;
+ link_config[1] = crtc_state->lane_count;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
+
+ /* eDP 1.4 rate select method. */
+ if (!link_bw)
+ drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
+ &rate_select, 1);
+}
+
/*
* Prepare link training by configuring the link parameters. On DDI platforms
* also enable the port here.
@@ -647,7 +679,6 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- u8 link_config[2];
u8 link_bw, rate_select;
if (intel_dp->prepare_link_retrain)
@@ -686,23 +717,13 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n",
encoder->base.base.id, encoder->base.name, rate_select);
-
- /* Write the link configuration data */
- link_config[0] = link_bw;
- link_config[1] = crtc_state->lane_count;
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
-
- /* eDP 1.4 rate select method. */
- if (!link_bw)
- drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
- &rate_select, 1);
-
- link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
- link_config[1] = intel_dp_is_uhbr(crtc_state) ?
- DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
- drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
+ /*
+ * Spec DP2.1 Section 3.5.2.16
+ * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate
+ */
+ intel_dp_update_downspread_ctrl(intel_dp, crtc_state);
+ intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw,
+ rate_select);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 2c49d9ab86a2..63d61e610210 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -318,6 +318,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 4e9c18be7e1f..ca0f362a40e3 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -8,6 +8,7 @@
#include "i915_reg.h"
#include "intel_crtc.h"
+#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
@@ -995,6 +996,32 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+ int ret;
+
+ ret = intel_cx0pll_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ /* TODO: Do the readback via intel_compute_shared_dplls() */
+ if (intel_is_c10phy(i915, phy))
+ crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
+ else
+ crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
return dpll->m < factor * dpll->n;
@@ -1423,6 +1450,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static const struct intel_dpll_funcs mtl_dpll_funcs = {
+ .crtc_compute_clock = mtl_crtc_compute_clock,
+};
+
static const struct intel_dpll_funcs dg2_dpll_funcs = {
.crtc_compute_clock = dg2_crtc_compute_clock,
};
@@ -1517,7 +1548,9 @@ int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
- if (IS_DG2(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 14)
+ dev_priv->display.funcs.dpll = &mtl_dpll_funcs;
+ else if (IS_DG2(dev_priv))
dev_priv->display.funcs.dpll = &dg2_dpll_funcs;
else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
dev_priv->display.funcs.dpll = &hsw_dpll_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 22fc908b7e5d..ed372d227aa7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -4104,7 +4104,7 @@ void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->display.dpll.lock);
- if (IS_DG2(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 14 || IS_DG2(dev_priv))
/* No shared DPLLs on DG2; port PLLs are part of the PHY */
dpll_mgr = NULL;
else if (IS_ALDERLAKE_P(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index eb2dcd866cc8..9884678743b6 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -271,6 +271,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b507ff944864..11bb8cf9c9d0 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -44,6 +44,7 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
+#include "i915_reg.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_cdclk.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index b708a62e509a..e7f77a225739 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -26,6 +26,8 @@
*/
#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_irq.h"
#include "intel_de.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index a690a5616506..61a2b63ab4d2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -40,12 +40,13 @@
#include <drm/drm_edid.h>
#include <drm/intel_lpe_audio.h>
-#include "i915_debugfs.h"
+#include "g4x_hdmi.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
+#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -58,7 +59,7 @@
#include "intel_panel.h"
#include "intel_snps_phy.h"
-static struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
+inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
{
return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
}
@@ -1865,16 +1866,19 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
* FIXME: We will hopefully get an algorithmic way of programming
* the MPLLB for HDMI in the future.
*/
- if (IS_DG2(dev_priv))
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock);
+ else if (IS_DG2(dev_priv))
return intel_snps_phy_check_hdmi_link_rate(clock);
return MODE_OK;
}
-int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output)
+int intel_hdmi_tmds_clock(int clock, int bpc,
+ enum intel_output_format sink_format)
{
/* YCBCR420 TMDS rate requirement is half the pixel clock */
- if (ycbcr420_output)
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
clock /= 2;
/*
@@ -1901,7 +1905,8 @@ static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bp
}
static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
- int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+ int bpc, bool has_hdmi_sink,
+ enum intel_output_format sink_format)
{
const struct drm_display_info *info = &connector->display_info;
const struct drm_hdmi_info *hdmi = &info->hdmi;
@@ -1911,7 +1916,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
if (!has_hdmi_sink)
return false;
- if (ycbcr420_output)
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
else
return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36;
@@ -1919,7 +1924,7 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
if (!has_hdmi_sink)
return false;
- if (ycbcr420_output)
+ if (sink_format == INTEL_OUTPUT_FORMAT_YCBCR420)
return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
else
return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30;
@@ -1933,7 +1938,8 @@ static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector,
static enum drm_mode_status
intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
- bool has_hdmi_sink, bool ycbcr420_output)
+ bool has_hdmi_sink,
+ enum intel_output_format sink_format)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
@@ -1946,12 +1952,12 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
* least one color depth is accepted.
*/
for (bpc = 12; bpc >= 8; bpc -= 2) {
- int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
if (!intel_hdmi_source_bpc_possible(i915, bpc))
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
continue;
status = hdmi_port_clock_valid(hdmi, tmds_clock, true, has_hdmi_sink);
@@ -1976,6 +1982,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
bool ycbcr_420_only;
+ enum intel_output_format sink_format;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
@@ -2000,14 +2007,20 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, mode);
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, ycbcr_420_only);
+ if (ycbcr_420_only)
+ sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ else
+ sink_format = INTEL_OUTPUT_FORMAT_RGB;
+
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
!connector->ycbcr_420_allowed ||
!drm_mode_is_420_also(&connector->display_info, mode))
return status;
- status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, true);
+ sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ status = intel_hdmi_mode_clock_valid(connector, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK)
return status;
}
@@ -2016,7 +2029,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
}
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
- int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+ int bpc, bool has_hdmi_sink)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_connector_state *connector_state;
@@ -2027,7 +2040,8 @@ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
- if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output))
+ if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink,
+ crtc_state->sink_format))
return false;
}
@@ -2051,8 +2065,7 @@ static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
- return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink,
- intel_hdmi_is_ycbcr420(crtc_state));
+ return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink);
}
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
@@ -2060,7 +2073,6 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
int clock, bool respect_downstream_limits)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state);
int bpc;
/*
@@ -2078,7 +2090,8 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
bpc = 8;
for (; bpc >= 8; bpc -= 2) {
- int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc,
+ crtc_state->sink_format);
if (hdmi_bpc_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi, tmds_clock,
@@ -2108,7 +2121,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
return bpc;
crtc_state->port_clock =
- intel_hdmi_tmds_clock(clock, bpc, intel_hdmi_is_ycbcr420(crtc_state));
+ intel_hdmi_tmds_clock(clock, bpc, crtc_state->sink_format);
/*
* pipe_bpp could already be below 8bpc due to
@@ -2170,9 +2183,9 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder,
}
static enum intel_output_format
-intel_hdmi_output_format(const struct intel_crtc_state *crtc_state,
- struct intel_connector *connector,
- bool ycbcr_420_output)
+intel_hdmi_sink_format(const struct intel_crtc_state *crtc_state,
+ struct intel_connector *connector,
+ bool ycbcr_420_output)
{
if (!crtc_state->has_hdmi_sink)
return INTEL_OUTPUT_FORMAT_RGB;
@@ -2183,6 +2196,12 @@ intel_hdmi_output_format(const struct intel_crtc_state *crtc_state,
return INTEL_OUTPUT_FORMAT_RGB;
}
+static enum intel_output_format
+intel_hdmi_output_format(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->sink_format;
+}
+
static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state,
@@ -2195,23 +2214,26 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
int ret;
- crtc_state->output_format =
- intel_hdmi_output_format(crtc_state, connector, ycbcr_420_only);
+ crtc_state->sink_format =
+ intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only);
- if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) {
+ if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) {
drm_dbg_kms(&i915->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
- crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
}
+ crtc_state->output_format = intel_hdmi_output_format(crtc_state);
ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits);
if (ret) {
- if (intel_hdmi_is_ycbcr420(crtc_state) ||
+ if (crtc_state->sink_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ !crtc_state->has_hdmi_sink ||
!connector->base.ycbcr_420_allowed ||
!drm_mode_is_420_also(info, adjusted_mode))
return ret;
- crtc_state->output_format = intel_hdmi_output_format(crtc_state, connector, true);
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_YCBCR420;
+ crtc_state->output_format = intel_hdmi_output_format(crtc_state);
ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits);
}
@@ -2243,11 +2265,20 @@ static bool source_supports_scrambling(struct intel_encoder *encoder)
return intel_hdmi_source_max_tmds_clock(encoder) > 340000;
}
+bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ return intel_has_hdmi_sink(hdmi, conn_state) &&
+ !intel_hdmi_is_cloned(crtc_state);
+}
+
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
@@ -2262,9 +2293,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->has_hdmi_sink =
- intel_has_hdmi_sink(intel_hdmi, conn_state) &&
- !intel_hdmi_is_cloned(pipe_config);
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
@@ -2597,10 +2625,21 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
+static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_i915_private *i915 = to_i915(state->dev);
+
+ if (HAS_DDI(i915))
+ return intel_digital_connector_atomic_check(connector, state);
+ else
+ return g4x_hdmi_connector_atomic_check(connector, state);
+}
+
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
- .atomic_check = intel_digital_connector_atomic_check,
+ .atomic_check = intel_hdmi_connector_atomic_check,
};
static void
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 774dda2376ed..6b39df38d57a 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
enum hdmi_infoframe_type;
+enum intel_output_format;
enum port;
struct drm_connector;
struct drm_connector_state;
@@ -23,6 +24,9 @@ union hdmi_infoframe;
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
+bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -45,8 +49,8 @@ void intel_read_infoframe(struct intel_encoder *encoder,
bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
- int bpc, bool has_hdmi_sink, bool ycbcr420_output);
-int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output);
+ int bpc, bool has_hdmi_sink);
+int intel_hdmi_tmds_clock(int clock, int bpc, enum intel_output_format sink_format);
int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width,
int num_slices, int output_format, bool hdmi_all_bpp,
int hdmi_max_chunk_bytes);
@@ -54,5 +58,6 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
+struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.c b/drivers/gpu/drm/i915/display/intel_load_detect.c
new file mode 100644
index 000000000000..d5a0aecf3e8f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_crtc.h"
+#include "intel_display_types.h"
+#include "intel_load_detect.h"
+
+/* VESA 640x480x72Hz mode to set on the pipe */
+static const struct drm_display_mode load_detect_mode = {
+ DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+};
+
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int ret, i;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->crtc != crtc)
+ continue;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
+
+ return 0;
+}
+
+struct drm_atomic_state *
+intel_load_detect_get_pipe(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+ struct intel_crtc *possible_crtc;
+ struct intel_crtc *crtc = NULL;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_atomic_state *state = NULL, *restore_state = NULL;
+ struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.base.id, encoder->base.name);
+
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
+
+ /*
+ * Algorithm gets a little messy:
+ *
+ * - if the connector already has an assigned crtc, use it (but make
+ * sure it's on first)
+ *
+ * - try to find the first unused crtc that can drive this connector,
+ * and use that if we find one
+ */
+
+ /* See if we already have a CRTC for this connector */
+ if (connector->state->crtc) {
+ crtc = to_intel_crtc(connector->state->crtc);
+
+ ret = drm_modeset_lock(&crtc->base.mutex, ctx);
+ if (ret)
+ goto fail;
+
+ /* Make sure the crtc and connector are running */
+ goto found;
+ }
+
+ /* Find an unused one (if possible) */
+ for_each_intel_crtc(dev, possible_crtc) {
+ if (!(encoder->base.possible_crtcs &
+ drm_crtc_mask(&possible_crtc->base)))
+ continue;
+
+ ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
+ if (ret)
+ goto fail;
+
+ if (possible_crtc->base.state->enable) {
+ drm_modeset_unlock(&possible_crtc->base.mutex);
+ continue;
+ }
+
+ crtc = possible_crtc;
+ break;
+ }
+
+ /*
+ * If we didn't find an unused CRTC, don't use any.
+ */
+ if (!crtc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no pipe available for load-detect\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+found:
+ state = drm_atomic_state_alloc(dev);
+ restore_state = drm_atomic_state_alloc(dev);
+ if (!state || !restore_state) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ state->acquire_ctx = ctx;
+ to_intel_atomic_state(state)->internal = true;
+
+ restore_state->acquire_ctx = ctx;
+ to_intel_atomic_state(restore_state)->internal = true;
+
+ connector_state = drm_atomic_get_connector_state(state, connector);
+ if (IS_ERR(connector_state)) {
+ ret = PTR_ERR(connector_state);
+ goto fail;
+ }
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
+ if (ret)
+ goto fail;
+
+ crtc_state = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ crtc_state->uapi.active = true;
+
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
+ &load_detect_mode);
+ if (ret)
+ goto fail;
+
+ ret = intel_modeset_disable_planes(state, &crtc->base);
+ if (ret)
+ goto fail;
+
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
+ if (!ret)
+ ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
+ if (!ret)
+ ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to create a copy of old state to restore: %i\n",
+ ret);
+ goto fail;
+ }
+
+ ret = drm_atomic_commit(state);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to set mode on load-detect pipe\n");
+ goto fail;
+ }
+
+ drm_atomic_state_put(state);
+
+ /* let the connector get through one full cycle before testing */
+ intel_crtc_wait_for_next_vblank(crtc);
+
+ return restore_state;
+
+fail:
+ if (state) {
+ drm_atomic_state_put(state);
+ state = NULL;
+ }
+ if (restore_state) {
+ drm_atomic_state_put(restore_state);
+ restore_state = NULL;
+ }
+
+ if (ret == -EDEADLK)
+ return ERR_PTR(ret);
+
+ return NULL;
+}
+
+void intel_load_detect_release_pipe(struct drm_connector *connector,
+ struct drm_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct intel_encoder *intel_encoder =
+ intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
+ struct drm_encoder *encoder = &intel_encoder->base;
+ int ret;
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
+ if (ret)
+ drm_dbg_kms(&i915->drm,
+ "Couldn't release load detect pipe: %i\n", ret);
+ drm_atomic_state_put(state);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_load_detect.h b/drivers/gpu/drm/i915/display/intel_load_detect.h
new file mode 100644
index 000000000000..aed51901b9ba
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_load_detect.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_LOAD_DETECT_H__
+#define __INTEL_LOAD_DETECT_H__
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_modeset_acquire_ctx;
+
+struct drm_atomic_state *
+intel_load_detect_get_pipe(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_load_detect_release_pipe(struct drm_connector *connector,
+ struct drm_atomic_state *old,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif /* __INTEL_LOAD_DETECT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 0de44b3631cd..1f4349a12a02 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -150,7 +150,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
if (DISPLAY_VER(dev_priv) < 4) {
tmp = intel_de_read(dev_priv, PFIT_CONTROL);
- crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ crtc_state->gmch_pfit.control |= tmp & PFIT_PANEL_8TO6_DITHER_ENABLE;
}
crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock;
@@ -437,6 +437,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
crtc_state->pipe_bpp = lvds_bpp;
}
+ crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 134b943f1953..eefa4018dc0c 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -69,6 +69,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
}
state->acquire_ctx = ctx;
+ to_intel_atomic_state(state)->internal = true;
/* Everything's already locked, -EDEADLK can't happen. */
temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
@@ -559,7 +560,8 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
*/
crtc_state->inherited = true;
- intel_crtc_update_active_timings(crtc_state);
+ intel_crtc_update_active_timings(crtc_state,
+ crtc_state->vrr.enable);
intel_crtc_copy_hw_to_uapi_state(crtc_state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 842d70f0dfd2..5e0ec15d9fd5 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -11,6 +11,7 @@
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_cx0_phy.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
@@ -236,6 +237,7 @@ void intel_modeset_verify_crtc(struct intel_crtc *crtc,
verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
intel_shared_dpll_state_verify(crtc, old_crtc_state, new_crtc_state);
intel_mpllb_state_verify(state, new_crtc_state);
+ intel_c10pll_state_verify(state, new_crtc_state);
}
void intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index c12bdca8da9b..d6fe2bbabe55 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -935,21 +935,25 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
* line with the intel documentation for the i965
*/
if (DISPLAY_VER(dev_priv) >= 4) {
+ u32 tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
+
/* on i965 use the PGM reg to read out the autoscaler values */
- ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK_965, tmp);
} else {
- if (pfit_control & VERT_AUTO_SCALE)
- ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
+ u32 tmp;
+
+ if (intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_VERT_AUTO_SCALE)
+ tmp = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
else
- ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
- ratio >>= PFIT_VERT_SCALE_SHIFT;
+ tmp = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
+
+ ratio = REG_FIELD_GET(PFIT_VERT_SCALE_MASK, tmp);
}
overlay->pfit_vscale_ratio = ratio;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index ce2a34a25211..9232a305b1e6 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -42,6 +42,7 @@
#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_quirks.h"
+#include "intel_vrr.h"
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
@@ -58,6 +59,38 @@ intel_panel_preferred_fixed_mode(struct intel_connector *connector)
struct drm_display_mode, head);
}
+static bool is_in_vrr_range(struct intel_connector *connector, int vrefresh)
+{
+ const struct drm_display_info *info = &connector->base.display_info;
+
+ return intel_vrr_is_capable(connector) &&
+ vrefresh >= info->monitor_range.min_vfreq &&
+ vrefresh <= info->monitor_range.max_vfreq;
+}
+
+static bool is_best_fixed_mode(struct intel_connector *connector,
+ int vrefresh, int fixed_mode_vrefresh,
+ const struct drm_display_mode *best_mode)
+{
+ /* we want to always return something */
+ if (!best_mode)
+ return true;
+
+ /*
+ * With VRR always pick a mode with equal/higher than requested
+ * vrefresh, which we can then reduce to match the requested
+ * vrefresh by extending the vblank length.
+ */
+ if (is_in_vrr_range(connector, vrefresh) &&
+ is_in_vrr_range(connector, fixed_mode_vrefresh) &&
+ fixed_mode_vrefresh < vrefresh)
+ return false;
+
+ /* pick the fixed_mode that is closest in terms of vrefresh */
+ return abs(fixed_mode_vrefresh - vrefresh) <
+ abs(drm_mode_vrefresh(best_mode) - vrefresh);
+}
+
const struct drm_display_mode *
intel_panel_fixed_mode(struct intel_connector *connector,
const struct drm_display_mode *mode)
@@ -65,11 +98,11 @@ intel_panel_fixed_mode(struct intel_connector *connector,
const struct drm_display_mode *fixed_mode, *best_mode = NULL;
int vrefresh = drm_mode_vrefresh(mode);
- /* pick the fixed_mode that is closest in terms of vrefresh */
list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
- if (!best_mode ||
- abs(drm_mode_vrefresh(fixed_mode) - vrefresh) <
- abs(drm_mode_vrefresh(best_mode) - vrefresh))
+ int fixed_mode_vrefresh = drm_mode_vrefresh(fixed_mode);
+
+ if (is_best_fixed_mode(connector, vrefresh,
+ fixed_mode_vrefresh, best_mode))
best_mode = fixed_mode;
}
@@ -178,27 +211,46 @@ int intel_panel_compute_config(struct intel_connector *connector,
{
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, adjusted_mode);
+ int vrefresh, fixed_mode_vrefresh;
+ bool is_vrr;
if (!fixed_mode)
return 0;
+ vrefresh = drm_mode_vrefresh(adjusted_mode);
+ fixed_mode_vrefresh = drm_mode_vrefresh(fixed_mode);
+
/*
- * We don't want to lie too much to the user about the refresh
- * rate they're going to get. But we have to allow a bit of latitude
- * for Xorg since it likes to automagically cook up modes with slightly
- * off refresh rates.
+ * Assume that we shouldn't muck about with the
+ * timings if they don't land in the VRR range.
*/
- if (abs(drm_mode_vrefresh(adjusted_mode) - drm_mode_vrefresh(fixed_mode)) > 1) {
- drm_dbg_kms(connector->base.dev,
- "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n",
- connector->base.base.id, connector->base.name,
- drm_mode_vrefresh(adjusted_mode), drm_mode_vrefresh(fixed_mode));
+ is_vrr = is_in_vrr_range(connector, vrefresh) &&
+ is_in_vrr_range(connector, fixed_mode_vrefresh);
- return -EINVAL;
+ if (!is_vrr) {
+ /*
+ * We don't want to lie too much to the user about the refresh
+ * rate they're going to get. But we have to allow a bit of latitude
+ * for Xorg since it likes to automagically cook up modes with slightly
+ * off refresh rates.
+ */
+ if (abs(vrefresh - fixed_mode_vrefresh) > 1) {
+ drm_dbg_kms(connector->base.dev,
+ "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n",
+ connector->base.base.id, connector->base.name,
+ vrefresh, fixed_mode_vrefresh);
+
+ return -EINVAL;
+ }
}
drm_mode_copy(adjusted_mode, fixed_mode);
+ if (is_vrr && fixed_mode_vrefresh != vrefresh)
+ adjusted_mode->vtotal =
+ DIV_ROUND_CLOSEST(adjusted_mode->clock * 1000,
+ adjusted_mode->htotal * vrefresh);
+
drm_mode_set_crtcinfo(adjusted_mode, 0);
return 0;
@@ -512,11 +564,11 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
bits = panel_fitter_scaling(pipe_src_h,
adjusted_mode->crtc_vdisplay);
- *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
- bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) |
+ PFIT_VERT_SCALE(bits));
*pfit_control |= (PFIT_ENABLE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_INTERP_BILINEAR);
}
} else if (scaled_width < scaled_height) { /* letter */
centre_vertically(adjusted_mode,
@@ -527,18 +579,19 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
bits = panel_fitter_scaling(pipe_src_w,
adjusted_mode->crtc_hdisplay);
- *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
- bits << PFIT_VERT_SCALE_SHIFT);
+ *pfit_pgm_ratios |= (PFIT_HORIZ_SCALE(bits) |
+ PFIT_VERT_SCALE(bits));
*pfit_control |= (PFIT_ENABLE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_INTERP_BILINEAR);
}
} else {
/* Aspects match, Let hw scale both directions */
*pfit_control |= (PFIT_ENABLE |
- VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ PFIT_VERT_AUTO_SCALE |
+ PFIT_HORIZ_AUTO_SCALE |
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_INTERP_BILINEAR);
}
}
@@ -586,10 +639,10 @@ static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
if (DISPLAY_VER(dev_priv) >= 4)
pfit_control |= PFIT_SCALING_AUTO;
else
- pfit_control |= (VERT_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_AUTO_SCALE |
- HORIZ_INTERP_BILINEAR);
+ pfit_control |= (PFIT_VERT_AUTO_SCALE |
+ PFIT_VERT_INTERP_BILINEAR |
+ PFIT_HORIZ_AUTO_SCALE |
+ PFIT_HORIZ_INTERP_BILINEAR);
}
break;
default:
@@ -610,7 +663,7 @@ out:
/* Make sure pre-965 set dither correctly for 18bpp panels. */
if (DISPLAY_VER(dev_priv) < 4 && crtc_state->pipe_bpp == 18)
- pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+ pfit_control |= PFIT_PANEL_8TO6_DITHER_ENABLE;
crtc_state->gmch_pfit.control = pfit_control;
crtc_state->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 8d3ea8d7b737..5a468ed6e26c 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -293,6 +293,7 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
}
state->acquire_ctx = &ctx;
+ to_intel_atomic_state(state)->internal = true;
retry:
pipe_config = intel_atomic_get_crtc_state(state, crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 7f9926672a6a..4f0b0cca03cc 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -1653,12 +1653,9 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
*/
pps_num = intel_num_pps(dev_priv);
- for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
-
- val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
- }
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
+ intel_de_rmw(dev_priv, PP_CONTROL(pps_idx),
+ PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
}
void intel_pps_setup(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 6badfff2b4a2..ea0389c5f656 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -85,6 +85,91 @@
* use page flips.
*/
+/*
+ * Description of PSR mask bits:
+ *
+ * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
+ *
+ * When unmasked (nearly) all display register writes (eg. even
+ * SWF) trigger a PSR exit. Some registers are excluded from this
+ * and they have a more specific mask (described below). On icl+
+ * this bit no longer exists and is effectively always set.
+ *
+ * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
+ *
+ * When unmasked (nearly) all pipe/plane register writes
+ * trigger a PSR exit. Some plane registers are excluded from this
+ * and they have a more specific mask (described below).
+ *
+ * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
+ * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
+ * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
+ *
+ * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
+ * SPR_SURF/CURBASE are not included in this and instead are
+ * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
+ * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
+ *
+ * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
+ * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
+ *
+ * When unmasked PSR is blocked as long as the sprite
+ * plane is enabled. skl+ with their universal planes no
+ * longer have a mask bit like this, and no plane being
+ * enabledb blocks PSR.
+ *
+ * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
+ * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
+ *
+ * When umasked CURPOS writes trigger a PSR exit. On skl+
+ * this doesn't exit but CURPOS is included in the
+ * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
+ *
+ * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
+ * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
+ *
+ * When unmasked PSR is blocked as long as vblank and/or vsync
+ * interrupt is unmasked in IMR *and* enabled in IER.
+ *
+ * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
+ * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
+ *
+ * Selectcs whether PSR exit generates an extra vblank before
+ * the first frame is transmitted. Also note the opposite polarity
+ * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
+ * unmasked==do not generate the extra vblank).
+ *
+ * With DC states enabled the extra vblank happens after link training,
+ * with DC states disabled it happens immediately upuon PSR exit trigger.
+ * No idea as of now why there is a difference. HSW/BDW (which don't
+ * even have DMC) always generate it after link training. Go figure.
+ *
+ * Unfortunately CHICKEN_TRANS itself seems to be double buffered
+ * and thus won't latch until the first vblank. So with DC states
+ * enabled the register effctively uses the reset value during DC5
+ * exit+PSR exit sequence, and thus the bit does nothing until
+ * latched by the vblank that it was trying to prevent from being
+ * generated in the first place. So we should probably call this
+ * one a chicken/egg bit instead on skl+.
+ *
+ * In standby mode (as opposed to link-off) this makes no difference
+ * as the timing generator keeps running the whole time generating
+ * normal periodic vblanks.
+ *
+ * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
+ * and doing so makes the behaviour match the skl+ reset value.
+ *
+ * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
+ * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
+ *
+ * On BDW without this bit is no vblanks whatsoever are
+ * generated after PSR exit. On HSW this has no apparant effect.
+ * WaPsrDPRSUnmaskVBlankInSRD says to set this.
+ *
+ * The rest of the bits are more self-explanatory and/or
+ * irrelevant for normal operation.
+ */
+
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
@@ -208,13 +293,13 @@ static void psr_event_print(struct drm_i915_private *i915,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
i915_reg_t imr_reg;
if (DISPLAY_VER(dev_priv) >= 12)
- imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
+ imr_reg = TRANS_PSR_IMR(cpu_transcoder);
else
imr_reg = EDP_PSR_IMR;
@@ -232,13 +317,11 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
transcoder_name(cpu_transcoder));
if (DISPLAY_VER(dev_priv) >= 9) {
- u32 val = intel_de_read(dev_priv,
- PSR_EVENT(cpu_transcoder));
- bool psr2_enabled = intel_dp->psr.psr2_enabled;
+ u32 val;
- intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
- val);
- psr_event_print(dev_priv, val, psr2_enabled);
+ val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
+
+ psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
}
}
@@ -419,7 +502,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
u32 val = 0;
if (DISPLAY_VER(dev_priv) >= 11)
- val |= EDP_PSR_TP4_TIME_0US;
+ val |= EDP_PSR_TP4_TIME_0us;
if (dev_priv->params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
@@ -448,9 +531,9 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
check_tp3_sel:
if (intel_dp_source_supports_tps3(dev_priv) &&
drm_dp_tps3_supported(intel_dp->dpcd))
- val |= EDP_PSR_TP1_TP3_SEL;
+ val |= EDP_PSR_TP_TP1_TP3;
else
- val |= EDP_PSR_TP1_TP2_SEL;
+ val |= EDP_PSR_TP_TP1_TP2;
return val;
}
@@ -476,12 +559,13 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
- val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
+ val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -493,9 +577,8 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
- EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
+ intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
+ ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -534,9 +617,10 @@ static int psr2_block_count(struct intel_dp *intel_dp)
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
- val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
+ val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
@@ -570,15 +654,13 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
* comments bellow for more information
*/
- u32 tmp;
+ int tmp;
tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
- tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
- val |= tmp;
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
- tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
- val |= tmp;
+ val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(dev_priv) >= 12) {
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
@@ -593,31 +675,30 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
}
/*
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(cpu_transcoder), 0);
- intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
}
static bool
-transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
+transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
{
if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
- return trans == TRANSCODER_A || trans == TRANSCODER_B;
+ return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
else if (DISPLAY_VER(dev_priv) >= 12)
- return trans == TRANSCODER_A;
+ return cpu_transcoder == TRANSCODER_A;
else
- return trans == TRANSCODER_EDP;
+ return cpu_transcoder == TRANSCODER_EDP;
}
static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
@@ -633,10 +714,11 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder),
- EDP_PSR2_IDLE_FRAME_MASK, idle_frames);
+ intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
+ EDP_PSR2_IDLE_FRAMES_MASK,
+ EDP_PSR2_IDLE_FRAMES(idle_frames));
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
@@ -1074,6 +1156,7 @@ void intel_psr_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_dp *intel_dp;
u32 val;
@@ -1100,15 +1183,14 @@ void intel_psr_get_config(struct intel_encoder *encoder,
goto unlock;
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
if (val & PSR2_MAN_TRK_CTL_ENABLE)
pipe_config->enable_psr2_sel_fetch = true;
}
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder));
- val &= EXITLINE_MASK;
- pipe_config->dc3co_exitline = val;
+ val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
+ pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
}
unlock:
mutex_unlock(&intel_dp->psr.lock);
@@ -1117,14 +1199,14 @@ unlock:
static void intel_psr_activate(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum transcoder transcoder = intel_dp->psr.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- if (transcoder_has_psr2(dev_priv, transcoder))
+ if (transcoder_has_psr2(dev_priv, cpu_transcoder))
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
+ intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+ intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder)) & EDP_PSR_ENABLE);
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1203,7 +1285,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(cpu_transcoder),
mask);
psr_irq_control(intel_dp);
@@ -1259,6 +1341,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
/*
@@ -1270,8 +1353,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* to avoid any rendering problems.
*/
if (DISPLAY_VER(dev_priv) >= 12)
- val = intel_de_read(dev_priv,
- TRANS_PSR_IIR(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, TRANS_PSR_IIR(cpu_transcoder));
else
val = intel_de_read(dev_priv, EDP_PSR_IIR);
val &= psr_irq_psr_error_bit_get(intel_dp);
@@ -1327,17 +1409,16 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
static void intel_psr_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
if (!intel_dp->psr.active) {
- if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
- val = intel_de_read(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
@@ -1345,19 +1426,16 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder));
+
+ val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
+ EDP_PSR2_ENABLE, 0);
+
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
- val &= ~EDP_PSR2_ENABLE;
- intel_de_write(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
} else {
- val = intel_de_read(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder));
+ val = intel_de_rmw(dev_priv, EDP_PSR_CTL(cpu_transcoder),
+ EDP_PSR_ENABLE, 0);
+
drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
- val &= ~EDP_PSR_ENABLE;
- intel_de_write(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder), val);
}
intel_dp->psr.active = false;
}
@@ -1365,14 +1443,15 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t psr_status;
u32 psr_status_mask;
if (intel_dp->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
+ psr_status = EDP_PSR2_STATUS(cpu_transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
+ psr_status = EDP_PSR_STATUS(cpu_transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -1385,6 +1464,7 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
enum phy phy = intel_port_to_phy(dev_priv,
dp_to_dig_port(intel_dp)->base.port);
@@ -1411,7 +1491,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder),
+ MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
@@ -1548,10 +1628,11 @@ static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled)
intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ PSR2_MAN_TRK_CTL(cpu_transcoder),
man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
man_trk_ctl_single_full_frame_bit_get(dev_priv) |
@@ -1651,6 +1732,7 @@ void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -1666,7 +1748,7 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
break;
}
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
crtc_state->psr2_man_track_ctl);
}
@@ -2045,6 +2127,7 @@ void intel_psr_post_plane_update(const struct intel_atomic_state *state)
static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
* Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
@@ -2052,13 +2135,14 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
return intel_de_wait_for_clear(dev_priv,
- EDP_PSR2_STATUS(intel_dp->psr.transcoder),
+ EDP_PSR2_STATUS(cpu_transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
}
static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
* From bspec: Panel Self Refresh (BDW+)
@@ -2067,7 +2151,7 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* defensive enough to cover everything.
*/
return intel_de_wait_for_clear(dev_priv,
- EDP_PSR_STATUS(intel_dp->psr.transcoder),
+ EDP_PSR_STATUS(cpu_transcoder),
EDP_PSR_STATUS_STATE_MASK, 50);
}
@@ -2109,6 +2193,7 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t reg;
u32 mask;
int err;
@@ -2117,10 +2202,10 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
return false;
if (intel_dp->psr.psr2_enabled) {
- reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
+ reg = EDP_PSR2_STATUS(cpu_transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
+ reg = EDP_PSR_STATUS(cpu_transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
@@ -2149,10 +2234,11 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
return -ENOMEM;
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
state->acquire_ctx = &ctx;
+ to_intel_atomic_state(state)->internal = true;
retry:
-
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
@@ -2281,6 +2367,7 @@ unlock:
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 val;
@@ -2294,7 +2381,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
val = man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
man_trk_ctl_continuos_full_frame(dev_priv);
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
} else {
@@ -2373,6 +2460,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
@@ -2389,7 +2477,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* SU configuration in case update is sent for any reason after
* sff bit gets cleared by the HW on next vblank.
*/
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
val);
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
@@ -2702,6 +2790,7 @@ static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
const char *status = "unknown";
u32 val, status_val;
@@ -2719,8 +2808,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -2735,10 +2823,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = intel_de_read(dev_priv,
- EDP_PSR_STATUS(intel_dp->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
+ val = intel_de_read(dev_priv, EDP_PSR_STATUS(cpu_transcoder));
+ status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
}
@@ -2749,6 +2835,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
const char *status;
@@ -2780,12 +2867,10 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
}
if (psr->psr2_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = intel_de_read(dev_priv,
- EDP_PSR_CTL(intel_dp->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR_CTL(cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
@@ -2797,12 +2882,9 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = intel_de_read(dev_priv,
- EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
+ val = intel_de_read(dev_priv, EDP_PSR_PERF_CNT(cpu_transcoder));
+ seq_printf(m, "Performance counter: %u\n",
+ REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
if (psr->debug & I915_PSR_DEBUG_IRQ) {
seq_printf(m, "Last attempted entry at: %lld\n",
@@ -2819,8 +2901,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
+ val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
su_frames_val[frame / 3] = val;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index 958d8cabc44b..0f7db617425a 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -22,30 +22,36 @@
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
#define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A)
-#define EDP_PSR_ENABLE (1 << 31)
-#define BDW_PSR_SINGLE_FRAME (1 << 30)
-#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
-#define EDP_PSR_LINK_STANDBY (1 << 27)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25)
-#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25)
-#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
-#define EDP_PSR_SKIP_AUX_EXIT (1 << 12)
-#define EDP_PSR_TP1_TP2_SEL (0 << 11)
-#define EDP_PSR_TP1_TP3_SEL (1 << 11)
-#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
-#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8)
-#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
-#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
-#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
-#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */
-#define EDP_PSR_TP1_TIME_500us (0 << 4)
-#define EDP_PSR_TP1_TIME_100us (1 << 4)
-#define EDP_PSR_TP1_TIME_2500us (2 << 4)
-#define EDP_PSR_TP1_TIME_0us (3 << 4)
-#define EDP_PSR_IDLE_FRAME_SHIFT 0
+#define EDP_PSR_ENABLE REG_BIT(31)
+#define BDW_PSR_SINGLE_FRAME REG_BIT(30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK REG_BIT(29) /* SW can't modify */
+#define EDP_PSR_LINK_STANDBY REG_BIT(27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK REG_GENMASK(26, 25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 0)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 1)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 2)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3)
+#define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20)
+#define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x))
+#define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12)
+#define EDP_PSR_TP_MASK REG_BIT(11)
+#define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0)
+#define EDP_PSR_TP_TP1_TP3 REG_FIELD_PREP(EDP_PSR_TP_MASK, 1)
+#define EDP_PSR_CRC_ENABLE REG_BIT(10) /* BDW+ */
+#define EDP_PSR_TP2_TP3_TIME_MASK REG_GENMASK(9, 8)
+#define EDP_PSR_TP2_TP3_TIME_500us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 0)
+#define EDP_PSR_TP2_TP3_TIME_100us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 1)
+#define EDP_PSR_TP2_TP3_TIME_2500us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 2)
+#define EDP_PSR_TP2_TP3_TIME_0us REG_FIELD_PREP(EDP_PSR_TP2_TP3_TIME_MASK, 3)
+#define EDP_PSR_TP4_TIME_MASK REG_GENMASK(7, 6)
+#define EDP_PSR_TP4_TIME_0us REG_FIELD_PREP(EDP_PSR_TP4_TIME_MASK, 3) /* ICL+ */
+#define EDP_PSR_TP1_TIME_MASK REG_GENMASK(5, 4)
+#define EDP_PSR_TP1_TIME_500us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 0)
+#define EDP_PSR_TP1_TIME_100us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 1)
+#define EDP_PSR_TP1_TIME_2500us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 2)
+#define EDP_PSR_TP1_TIME_0us REG_FIELD_PREP(EDP_PSR_TP1_TIME_MASK, 3)
+#define EDP_PSR_IDLE_FRAMES_MASK REG_GENMASK(3, 0)
+#define EDP_PSR_IDLE_FRAMES(x) REG_FIELD_PREP(EDP_PSR_IDLE_FRAMES_MASK, (x))
/*
* Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative
@@ -80,81 +86,90 @@
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
#define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A)
-#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
-#define EDP_PSR_STATUS_STATE_SHIFT 29
-#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
-#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29)
-#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29)
-#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29)
-#define EDP_PSR_STATUS_STATE_BUFON (4 << 29)
-#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29)
-#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29)
-#define EDP_PSR_STATUS_LINK_MASK (3 << 26)
-#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26)
-#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26)
-#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26)
-#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
-#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
-#define EDP_PSR_STATUS_COUNT_SHIFT 16
-#define EDP_PSR_STATUS_COUNT_MASK 0xf
-#define EDP_PSR_STATUS_AUX_ERROR (1 << 15)
-#define EDP_PSR_STATUS_AUX_SENDING (1 << 12)
-#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9)
-#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8)
-#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4)
-#define EDP_PSR_STATUS_IDLE_MASK 0xf
+#define EDP_PSR_STATUS_STATE_MASK REG_GENMASK(31, 29)
+#define EDP_PSR_STATUS_STATE_IDLE REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 0)
+#define EDP_PSR_STATUS_STATE_SRDONACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 1)
+#define EDP_PSR_STATUS_STATE_SRDENT REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 2)
+#define EDP_PSR_STATUS_STATE_BUFOFF REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 3)
+#define EDP_PSR_STATUS_STATE_BUFON REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 4)
+#define EDP_PSR_STATUS_STATE_AUXACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 5)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK REG_FIELD_PREP(EDP_PSR_STATUS_STATE_MASK, 6)
+#define EDP_PSR_STATUS_LINK_MASK REG_GENMASK(27, 26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF REG_FIELD_PREP(EDP_PSR_STATUS_LINK_MASK, 0)
+#define EDP_PSR_STATUS_LINK_FULL_ON REG_FIELD_PREP(EDP_PSR_STATUS_LINK_MASK, 1)
+#define EDP_PSR_STATUS_LINK_STANDBY REG_FIELD_PREP(EDP_PSR_STATUS_LINK_MASK, 2)
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK REG_GENMASK(24, 20)
+#define EDP_PSR_STATUS_COUNT_MASK REG_GENMASK(19, 16)
+#define EDP_PSR_STATUS_AUX_ERROR REG_BIT(15)
+#define EDP_PSR_STATUS_AUX_SENDING REG_BIT(12)
+#define EDP_PSR_STATUS_SENDING_IDLE REG_BIT(9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 REG_BIT(8)
+#define EDP_PSR_STATUS_SENDING_TP1 REG_BIT(4)
+#define EDP_PSR_STATUS_IDLE_MASK REG_GENMASK(3, 0)
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
#define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A)
-#define EDP_PSR_PERF_CNT_MASK 0xffffff
+#define EDP_PSR_PERF_CNT_MASK REG_GENMASK(23, 0)
/* PSR_MASK on SKL+ */
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
#define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A)
-#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
-#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
-#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP REG_BIT(28)
+#define EDP_PSR_DEBUG_MASK_LPSP REG_BIT(27)
+#define EDP_PSR_DEBUG_MASK_MEMUP REG_BIT(26)
+#define EDP_PSR_DEBUG_MASK_HPD REG_BIT(25)
+#define EDP_PSR_DEBUG_MASK_FBC_MODIFY REG_BIT(24)
+#define EDP_PSR_DEBUG_MASK_PRIMARY_FLIP REG_BIT(23) /* hsw */
+#define EDP_PSR_DEBUG_MASK_HDCP_ENABLE REG_BIT(22) /* hsw/bdw */
+#define EDP_PSR_DEBUG_MASK_SPRITE_ENABLE REG_BIT(21) /* hsw */
+#define EDP_PSR_DEBUG_MASK_CURSOR_MOVE REG_BIT(20) /* hsw */
+#define EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT REG_BIT(19) /* hsw */
+#define EDP_PSR_DEBUG_MASK_DPST_PHASE_IN REG_BIT(18) /* hsw */
+#define EDP_PSR_DEBUG_MASK_KVMR_SESSION_EN REG_BIT(17)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE REG_BIT(16) /* hsw-skl */
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN REG_BIT(15) /* skl+ */
+#define EDP_PSR_DEBUG_RFB_UPDATE_SENT REG_BIT(2) /* bdw */
+#define EDP_PSR_DEBUG_ENTRY_COMPLETION REG_BIT(1) /* hsw/bdw */
#define _PSR2_CTL_A 0x60900
#define _PSR2_CTL_EDP 0x6f900
#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A)
-#define EDP_PSR2_ENABLE (1 << 31)
-#define EDP_SU_TRACK_ENABLE (1 << 30) /* up to adl-p */
-#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28)
-#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28)
+#define EDP_PSR2_ENABLE REG_BIT(31)
+#define EDP_SU_TRACK_ENABLE REG_BIT(30) /* up to adl-p */
+#define TGL_EDP_PSR2_BLOCK_COUNT_MASK REG_BIT(28)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 0)
+#define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 REG_FIELD_PREP(TGL_EDP_PSR2_BLOCK_COUNT_MASK, 1)
#define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */
#define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */
-#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20)
-#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK REG_GENMASK(24, 20)
+#define EDP_MAX_SU_DISABLE_TIME(t) REG_FIELD_PREP(EDP_MAX_SU_DISABLE_TIME, (t))
+#define EDP_PSR2_IO_BUFFER_WAKE_MASK REG_GENMASK(14, 13)
#define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8
-#define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
-#define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13)
+#define EDP_PSR2_IO_BUFFER_WAKE(lines) REG_FIELD_PREP(EDP_PSR2_IO_BUFFER_WAKE_MASK, \
+ EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines))
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK REG_GENMASK(15, 13)
#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5
-#define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13
-#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT)
-#define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13)
+#define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) REG_FIELD_PREP(TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK, \
+ (lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES)
+#define EDP_PSR2_FAST_WAKE_MASK REG_GENMASK(12, 11)
#define EDP_PSR2_FAST_WAKE_MAX_LINES 8
-#define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
-#define EDP_PSR2_FAST_WAKE_MASK (3 << 11)
+#define EDP_PSR2_FAST_WAKE(lines) REG_FIELD_PREP(EDP_PSR2_FAST_WAKE_MASK, \
+ EDP_PSR2_FAST_WAKE_MAX_LINES - (lines))
+#define TGL_EDP_PSR2_FAST_WAKE_MASK REG_GENMASK(12, 10)
#define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5
-#define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10
-#define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT)
-#define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10)
-#define EDP_PSR2_TP2_TIME_500us (0 << 8)
-#define EDP_PSR2_TP2_TIME_100us (1 << 8)
-#define EDP_PSR2_TP2_TIME_2500us (2 << 8)
-#define EDP_PSR2_TP2_TIME_50us (3 << 8)
-#define EDP_PSR2_TP2_TIME_MASK (3 << 8)
-#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
-#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4)
-#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4)
-#define EDP_PSR2_IDLE_FRAME_MASK 0xf
-#define EDP_PSR2_IDLE_FRAME_SHIFT 0
+#define TGL_EDP_PSR2_FAST_WAKE(lines) REG_FIELD_PREP(TGL_EDP_PSR2_FAST_WAKE_MASK, \
+ (lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES)
+#define EDP_PSR2_TP2_TIME_MASK REG_GENMASK(9, 8)
+#define EDP_PSR2_TP2_TIME_500us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 0)
+#define EDP_PSR2_TP2_TIME_100us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 1)
+#define EDP_PSR2_TP2_TIME_2500us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 2)
+#define EDP_PSR2_TP2_TIME_50us REG_FIELD_PREP(EDP_PSR2_TP2_TIME_MASK, 3)
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK REG_GENMASK(7, 4)
+#define EDP_PSR2_FRAME_BEFORE_SU(a) REG_FIELD_PREP(EDP_PSR2_FRAME_BEFORE_SU_MASK, (a))
+#define EDP_PSR2_IDLE_FRAMES_MASK REG_GENMASK(3, 0)
+#define EDP_PSR2_IDLE_FRAMES(x) REG_FIELD_PREP(EDP_PSR2_IDLE_FRAMES_MASK, (x))
#define _PSR_EVENT_TRANS_A 0x60848
#define _PSR_EVENT_TRANS_B 0x61848
@@ -162,22 +177,22 @@
#define _PSR_EVENT_TRANS_D 0x63848
#define _PSR_EVENT_TRANS_EDP 0x6f848
#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A)
-#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
-#define PSR_EVENT_PSR2_DISABLED (1 << 16)
-#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
-#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14)
-#define PSR_EVENT_GRAPHICS_RESET (1 << 12)
-#define PSR_EVENT_PCH_INTERRUPT (1 << 11)
-#define PSR_EVENT_MEMORY_UP (1 << 10)
-#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
-#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
-#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
-#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
-#define PSR_EVENT_HDCP_ENABLE (1 << 4)
-#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
-#define PSR_EVENT_VBI_ENABLE (1 << 2)
-#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
-#define PSR_EVENT_PSR_DISABLE (1 << 0)
+#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE REG_BIT(17)
+#define PSR_EVENT_PSR2_DISABLED REG_BIT(16)
+#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN REG_BIT(15)
+#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN REG_BIT(14)
+#define PSR_EVENT_GRAPHICS_RESET REG_BIT(12)
+#define PSR_EVENT_PCH_INTERRUPT REG_BIT(11)
+#define PSR_EVENT_MEMORY_UP REG_BIT(10)
+#define PSR_EVENT_FRONT_BUFFER_MODIFY REG_BIT(9)
+#define PSR_EVENT_WD_TIMER_EXPIRE REG_BIT(8)
+#define PSR_EVENT_PIPE_REGISTERS_UPDATE REG_BIT(6)
+#define PSR_EVENT_REGISTER_UPDATE REG_BIT(5) /* Reserved in ICL+ */
+#define PSR_EVENT_HDCP_ENABLE REG_BIT(4)
+#define PSR_EVENT_KVMR_SESSION_ENABLE REG_BIT(3)
+#define PSR_EVENT_VBI_ENABLE REG_BIT(2)
+#define PSR_EVENT_LPSP_MODE_EXIT REG_BIT(1)
+#define PSR_EVENT_PSR_DISABLE REG_BIT(0)
#define _PSR2_STATUS_A 0x60940
#define _PSR2_STATUS_EDP 0x6f940
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index e12ba458636c..34ee9dd82a78 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1351,6 +1351,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 1cfb94b5cedb..a72677bf617b 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -2007,6 +2007,11 @@ void intel_mpllb_state_verify(struct intel_atomic_state *state,
if (!new_crtc_state->hw.active)
return;
+ /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state))
+ return;
+
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h
index 4635c7ad23f9..91c6dca342b2 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.h
+++ b/drivers/gpu/drm/i915/display/intel_sprite.h
@@ -16,16 +16,6 @@ struct intel_crtc_state;
struct intel_plane_state;
enum pipe;
-/*
- * FIXME: We should instead only take spinlocks once for the entire update
- * instead of once per mmio.
- */
-#if IS_ENABLED(CONFIG_PROVE_LOCKING)
-#define VBLANK_EVASION_TIME_US 250
-#else
-#define VBLANK_EVASION_TIME_US 100
-#endif
-
struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
index 70a391083751..a76b48ebc2d3 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite_uapi.c
@@ -86,6 +86,7 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
goto out;
}
state->acquire_ctx = &ctx;
+ to_intel_atomic_state(state)->internal = true;
while (1) {
plane_state = drm_atomic_get_plane_state(state, plane);
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 3b60995e9dfb..4fca711a58bc 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
@@ -15,6 +16,10 @@
#include "intel_mg_phy_regs.h"
#include "intel_tc.h"
+#define DP_PIN_ASSIGNMENT_C 0x3
+#define DP_PIN_ASSIGNMENT_D 0x4
+#define DP_PIN_ASSIGNMENT_E 0x5
+
enum tc_port_mode {
TC_PORT_DISCONNECTED,
TC_PORT_TBT_ALT,
@@ -59,6 +64,7 @@ static enum intel_display_power_domain
tc_phy_cold_off_domain(struct intel_tc_port *);
static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
static bool tc_phy_is_ready(struct intel_tc_port *tc);
+static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
static const char *tc_port_mode_name(enum tc_port_mode mode)
@@ -141,15 +147,23 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
*
* POWER_DOMAIN_TC_COLD_OFF:
* -------------------------
- * TGL/legacy, DP-alt modes:
+ * ICL/DP-alt, TBT mode:
+ * - TCSS/TBT: block TC-cold power state for using the (direct or
+ * TBT DP-IN) AUX and main lanes.
+ *
+ * TGL/all modes:
* - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
- * - TCSS/PHY: block TC-cold power state for using the PHY AUX and
- * main lanes.
+ * - TCSS/PHY: block TC-cold power state for using the (direct or
+ * TBT DP-IN) AUX and main lanes.
*
- * ICL, TGL, ADLP/TBT mode:
- * - TCSS/IOM,FIA access for HPD live state
+ * ADLP/TBT mode:
* - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
* AUX and main lanes.
+ *
+ * XELPDP+/all modes:
+ * - TCSS/IOM,FIA access for PHY ready, owned state
+ * - TCSS/PHY: block TC-cold power state for using the (direct or
+ * TBT DP-IN) AUX and main lanes.
*/
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
{
@@ -271,6 +285,27 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
}
+static int mtl_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+ u32 pin_mask;
+
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
+
+ switch (pin_mask) {
+ default:
+ MISSING_CASE(pin_mask);
+ fallthrough;
+ case DP_PIN_ASSIGNMENT_D:
+ return 2;
+ case DP_PIN_ASSIGNMENT_C:
+ case DP_PIN_ASSIGNMENT_E:
+ return 4;
+ }
+}
+
int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -284,6 +319,9 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
assert_tc_cold_blocked(tc);
+ if (DISPLAY_VER(i915) >= 14)
+ return mtl_tc_port_get_pin_assignment_mask(dig_port);
+
lane_mask = 0;
with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
lane_mask = intel_tc_port_get_lane_mask(dig_port);
@@ -873,6 +911,200 @@ static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
};
/*
+ * XELPDP TC PHY handlers
+ * ----------------------
+ */
+static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ struct intel_digital_port *dig_port = tc->dig_port;
+ enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
+ u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
+ u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
+ intel_wakeref_t wakeref;
+ u32 pica_isr;
+ u32 pch_isr;
+ u32 mask = 0;
+
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
+ pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
+ pch_isr = intel_de_read(i915, SDEISR);
+ }
+
+ if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
+ mask |= BIT(TC_PORT_DP_ALT);
+ if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
+ mask |= BIT(TC_PORT_TBT_ALT);
+
+ if (tc->legacy_port && (pch_isr & pch_isr_bit))
+ mask |= BIT(TC_PORT_LEGACY);
+
+ return mask;
+}
+
+static bool
+xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+
+ assert_tc_cold_blocked(tc);
+
+ return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
+}
+
+static bool
+xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
+ drm_dbg_kms(&i915->drm,
+ "Port %s: timeout waiting for TCSS power to get %s\n",
+ enabled ? "enabled" : "disabled",
+ tc->port_name);
+ return false;
+ }
+
+ return true;
+}
+
+static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+ u32 val;
+
+ assert_tc_cold_blocked(tc);
+
+ val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ if (enable)
+ val |= XELPDP_TCSS_POWER_REQUEST;
+ else
+ val &= ~XELPDP_TCSS_POWER_REQUEST;
+ intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+}
+
+static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+
+ __xelpdp_tc_phy_enable_tcss_power(tc, enable);
+
+ if ((!tc_phy_wait_for_ready(tc) ||
+ !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) &&
+ !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
+ if (enable) {
+ __xelpdp_tc_phy_enable_tcss_power(tc, false);
+ xelpdp_tc_phy_wait_for_tcss_power(tc, false);
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+ u32 val;
+
+ assert_tc_cold_blocked(tc);
+
+ val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
+ if (take)
+ val |= XELPDP_TC_PHY_OWNERSHIP;
+ else
+ val &= ~XELPDP_TC_PHY_OWNERSHIP;
+ intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
+}
+
+static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ enum port port = tc->dig_port->base.port;
+
+ assert_tc_cold_blocked(tc);
+
+ return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
+}
+
+static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
+{
+ struct drm_i915_private *i915 = tc_to_i915(tc);
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
+
+ tc_cold_wref = __tc_cold_block(tc, &domain);
+
+ tc->mode = tc_phy_get_current_mode(tc);
+ if (tc->mode != TC_PORT_DISCONNECTED)
+ tc->lock_wakeref = tc_cold_block(tc);
+
+ drm_WARN_ON(&i915->drm,
+ (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
+ !xelpdp_tc_phy_tcss_power_is_enabled(tc));
+
+ __tc_cold_unblock(tc, domain, tc_cold_wref);
+}
+
+static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
+{
+ tc->lock_wakeref = tc_cold_block(tc);
+
+ if (tc->mode == TC_PORT_TBT_ALT)
+ return true;
+
+ if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
+ goto out_unblock_tccold;
+
+ xelpdp_tc_phy_take_ownership(tc, true);
+
+ if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
+ goto out_release_phy;
+
+ return true;
+
+out_release_phy:
+ xelpdp_tc_phy_take_ownership(tc, false);
+ xelpdp_tc_phy_wait_for_tcss_power(tc, false);
+
+out_unblock_tccold:
+ tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
+
+ return false;
+}
+
+static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
+{
+ switch (tc->mode) {
+ case TC_PORT_LEGACY:
+ case TC_PORT_DP_ALT:
+ xelpdp_tc_phy_take_ownership(tc, false);
+ xelpdp_tc_phy_enable_tcss_power(tc, false);
+ fallthrough;
+ case TC_PORT_TBT_ALT:
+ tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
+ break;
+ default:
+ MISSING_CASE(tc->mode);
+ }
+}
+
+static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
+ .cold_off_domain = tgl_tc_phy_cold_off_domain,
+ .hpd_live_status = xelpdp_tc_phy_hpd_live_status,
+ .is_ready = adlp_tc_phy_is_ready,
+ .is_owned = xelpdp_tc_phy_is_owned,
+ .get_hw_state = xelpdp_tc_phy_get_hw_state,
+ .connect = xelpdp_tc_phy_connect,
+ .disconnect = xelpdp_tc_phy_disconnect,
+ .init = adlp_tc_phy_init,
+};
+
+/*
* Generic TC PHY handlers
* -----------------------
*/
@@ -945,13 +1177,18 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
return is_connected;
}
-static void tc_phy_wait_for_ready(struct intel_tc_port *tc)
+static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
struct drm_i915_private *i915 = tc_to_i915(tc);
- if (wait_for(tc_phy_is_ready(tc), 100))
+ if (wait_for(tc_phy_is_ready(tc), 500)) {
drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
+
+ return false;
+ }
+
+ return true;
}
static enum tc_port_mode
@@ -1442,7 +1679,9 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
dig_port->tc = tc;
tc->dig_port = dig_port;
- if (DISPLAY_VER(i915) >= 13)
+ if (DISPLAY_VER(i915) >= 14)
+ tc->phy_ops = &xelpdp_tc_phy_ops;
+ else if (DISPLAY_VER(i915) >= 13)
tc->phy_ops = &adlp_tc_phy_ops;
else if (DISPLAY_VER(i915) >= 12)
tc->phy_ops = &tgl_tc_phy_ops;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 557ec5b62afa..96fe4a280077 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -43,6 +43,7 @@
#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_load_detect.h"
#include "intel_tv.h"
#include "intel_tv_regs.h"
@@ -1205,6 +1206,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
@@ -1722,21 +1724,21 @@ intel_tv_detect(struct drm_connector *connector,
return connector_status_disconnected;
if (force) {
- struct intel_load_detect_pipe tmp;
- int ret;
+ struct drm_atomic_state *state;
- ret = intel_get_load_detect_pipe(connector, &tmp, ctx);
- if (ret < 0)
- return ret;
+ state = intel_load_detect_get_pipe(connector, ctx);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
- if (ret > 0) {
+ if (state) {
type = intel_tv_detect_type(intel_tv, connector);
- intel_release_load_detect_pipe(connector, &tmp, ctx);
+ intel_load_detect_release_pipe(connector, state, ctx);
status = type < 0 ?
connector_status_disconnected :
connector_status_connected;
- } else
+ } else {
status = connector_status_unknown;
+ }
if (status == connector_status_connected) {
intel_tv->type = type;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index f8bf9810527d..f5659ebd08eb 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -340,8 +340,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
* matches how the scanline counter based position works since
* the scanline counter doesn't count the two half lines.
*/
- if (position >= vtotal)
- position = vtotal - 1;
+ position = min(position, vtotal - 1);
/*
* Start of vblank interrupt is triggered at start of hsync,
@@ -488,21 +487,27 @@ static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state)
}
}
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
+ bool vrr_enable)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ u8 mode_flags = crtc_state->mode_flags;
struct drm_display_mode adjusted_mode;
int vmax_vblank_start = 0;
unsigned long irqflags;
drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
- if (crtc_state->vrr.enable) {
+ if (vrr_enable) {
+ drm_WARN_ON(&i915->drm, (mode_flags & I915_MODE_FLAG_VRR) == 0);
+
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
+ } else {
+ mode_flags &= ~I915_MODE_FLAG_VRR;
}
/*
@@ -524,7 +529,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
crtc->vmax_vblank_start = vmax_vblank_start;
- crtc->mode_flags = crtc_state->mode_flags;
+ crtc->mode_flags = mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 0884db7e76ae..08e706b29149 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -20,6 +20,7 @@ bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
-void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state);
+void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
+ bool vrr_enable);
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 4228f26b4c11..88e4759b538b 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -114,9 +114,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
- if (!crtc_state->uapi.vrr_enabled)
- return;
-
vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000,
adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq);
vmax = adjusted_mode->crtc_clock * 1000 /
@@ -135,7 +132,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
*/
crtc_state->vrr.vmin = vmin - 1;
crtc_state->vrr.vmax = vmax;
- crtc_state->vrr.enable = true;
crtc_state->vrr.flipline = crtc_state->vrr.vmin + 1;
@@ -152,7 +148,10 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
crtc_state->framestart_delay - 1);
}
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+ if (crtc_state->uapi.vrr_enabled) {
+ crtc_state->vrr.enable = true;
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+ }
}
static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
@@ -168,23 +167,28 @@ static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
VRR_CTL_PIPELINE_FULL_OVERRIDE;
}
-void intel_vrr_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->vrr.enable)
+ /*
+ * TRANS_SET_CONTEXT_LATENCY with VRR enabled
+ * requires this chicken bit on ADL/DG2.
+ */
+ if (DISPLAY_VER(dev_priv) == 13)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ 0, PIPE_VBLANK_WITH_DELAY);
+
+ if (!crtc_state->vrr.flipline) {
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
return;
+ }
intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(crtc_state));
intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
- intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
-
- intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
@@ -212,6 +216,19 @@ bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state)
return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND;
}
+void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (!crtc_state->vrr.enable)
+ return;
+
+ intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+}
+
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
@@ -225,22 +242,18 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
trans_vrr_ctl(old_crtc_state));
intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
VRR_STATUS_VRR_EN_LIVE, 1000);
-
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
- intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
}
-void intel_vrr_get_config(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 trans_vrr_ctl;
trans_vrr_ctl = intel_de_read(dev_priv, TRANS_VRR_CTL(cpu_transcoder));
+
crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE;
- if (!crtc_state->vrr.enable)
- return;
if (DISPLAY_VER(dev_priv) >= 13)
crtc_state->vrr.guardband =
@@ -249,10 +262,13 @@ void intel_vrr_get_config(struct intel_crtc *crtc,
if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
crtc_state->vrr.pipeline_full =
REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
- if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN)
+
+ if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
crtc_state->vrr.flipline = intel_de_read(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder)) + 1;
- crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
- crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmax = intel_de_read(dev_priv, TRANS_VRR_VMAX(cpu_transcoder)) + 1;
+ crtc_state->vrr.vmin = intel_de_read(dev_priv, TRANS_VRR_VMIN(cpu_transcoder)) + 1;
+ }
- crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+ if (crtc_state->vrr.enable)
+ crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 9fda1135b0dd..de16960c4929 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -11,22 +11,18 @@
struct drm_connector_state;
struct intel_atomic_state;
struct intel_connector;
-struct intel_crtc;
struct intel_crtc_state;
-struct intel_dp;
-struct intel_encoder;
bool intel_vrr_is_capable(struct intel_connector *connector);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
-void intel_vrr_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
+void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
-void intel_vrr_get_config(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
+void intel_vrr_get_config(struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 0e7e014fcc71..1e7c97243fcf 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -348,6 +348,263 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
return 0;
}
+static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ int j;
+ u32 mode;
+
+ if (*scaler_id < 0) {
+ /* find a free scaler */
+ for (j = 0; j < intel_crtc->num_scalers; j++) {
+ if (scaler_state->scalers[j].in_use)
+ continue;
+
+ *scaler_id = j;
+ scaler_state->scalers[*scaler_id].in_use = 1;
+ break;
+ }
+ }
+
+ if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ "Cannot find scaler for %s:%d\n", name, idx))
+ return -EINVAL;
+
+ /* set scaler mode */
+ if (plane_state && plane_state->hw.fb &&
+ plane_state->hw.fb->format->is_yuv &&
+ plane_state->hw.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+
+ if (DISPLAY_VER(dev_priv) == 9) {
+ mode = SKL_PS_SCALER_MODE_NV12;
+ } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
+ /*
+ * On gen11+'s HDR planes we only use the scaler for
+ * scaling. They have a dedicated chroma upsampler, so
+ * we don't need the scaler to upsample the UV plane.
+ */
+ mode = PS_SCALER_MODE_NORMAL;
+ } else {
+ struct intel_plane *linked =
+ plane_state->planar_linked_plane;
+
+ mode = PS_SCALER_MODE_PLANAR;
+
+ if (linked)
+ mode |= PS_BINDING_Y_PLANE(linked->id);
+ }
+ } else if (DISPLAY_VER(dev_priv) >= 10) {
+ mode = PS_SCALER_MODE_NORMAL;
+ } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ /*
+ * when only 1 scaler is in use on a pipe with 2 scalers
+ * scaler 0 operates in high quality (HQ) mode.
+ * In this case use scaler 0 to take advantage of HQ mode
+ */
+ scaler_state->scalers[*scaler_id].in_use = 0;
+ *scaler_id = 0;
+ scaler_state->scalers[0].in_use = 1;
+ mode = SKL_PS_SCALER_MODE_HQ;
+ } else {
+ mode = SKL_PS_SCALER_MODE_DYN;
+ }
+
+ /*
+ * FIXME: we should also check the scaler factors for pfit, so
+ * this shouldn't be tied directly to planes.
+ */
+ if (plane_state && plane_state->hw.fb) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
+ int hscale, vscale, max_vscale, max_hscale;
+
+ /*
+ * FIXME: When two scalers are needed, but only one of
+ * them needs to downscale, we should make sure that
+ * the one that needs downscaling support is assigned
+ * as the first scaler, so we don't reject downscaling
+ * unnecessarily.
+ */
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ /*
+ * On versions 14 and up, only the first
+ * scaler supports a vertical scaling factor
+ * of more than 1.0, while a horizontal
+ * scaling factor of 3.0 is supported.
+ */
+ max_hscale = 0x30000 - 1;
+ if (*scaler_id == 0)
+ max_vscale = 0x30000 - 1;
+ else
+ max_vscale = 0x10000;
+
+ } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ max_hscale = 0x30000 - 1;
+ max_vscale = 0x30000 - 1;
+ } else {
+ max_hscale = 0x20000 - 1;
+ max_vscale = 0x20000 - 1;
+ }
+
+ /*
+ * FIXME: We should change the if-else block above to
+ * support HQ vs dynamic scaler properly.
+ */
+
+ /* Check if required scaling is within limits */
+ hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
+ vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
+
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Scaler %d doesn't support required plane scaling\n",
+ *scaler_id);
+ drm_rect_debug_print("src: ", src, true);
+ drm_rect_debug_print("dst: ", dst, false);
+
+ return -EINVAL;
+ }
+ }
+
+ drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
+ scaler_state->scalers[*scaler_id].mode = mode;
+
+ return 0;
+}
+
+/**
+ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+ * @dev_priv: i915 device
+ * @intel_crtc: intel crtc
+ * @crtc_state: incoming crtc_state to validate and setup scalers
+ *
+ * This function sets up scalers based on staged scaling requests for
+ * a @crtc and its planes. It is called from crtc level check path. If request
+ * is a supportable request, it attaches scalers to requested planes and crtc.
+ *
+ * This function takes into account the current scaler(s) in use by any planes
+ * not being part of this atomic state
+ *
+ * Returns:
+ * 0 - scalers were setup successfully
+ * error code - otherwise
+ */
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_plane *plane = NULL;
+ struct intel_plane *intel_plane;
+ struct intel_plane_state *plane_state = NULL;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_atomic_state *drm_state = crtc_state->uapi.state;
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
+ int num_scalers_need;
+ int i;
+
+ num_scalers_need = hweight32(scaler_state->scaler_users);
+
+ /*
+ * High level flow:
+ * - staged scaler requests are already in scaler_state->scaler_users
+ * - check whether staged scaling requests can be supported
+ * - add planes using scalers that aren't in current transaction
+ * - assign scalers to requested users
+ * - as part of plane commit, scalers will be committed
+ * (i.e., either attached or detached) to respective planes in hw
+ * - as part of crtc_commit, scaler will be either attached or detached
+ * to crtc in hw
+ */
+
+ /* fail if required scalers > available scalers */
+ if (num_scalers_need > intel_crtc->num_scalers) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Too many scaling requests %d > %d\n",
+ num_scalers_need, intel_crtc->num_scalers);
+ return -EINVAL;
+ }
+
+ /* walkthrough scaler_users bits and start assigning scalers */
+ for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+ int *scaler_id;
+ const char *name;
+ int idx, ret;
+
+ /* skip if scaler not required */
+ if (!(scaler_state->scaler_users & (1 << i)))
+ continue;
+
+ if (i == SKL_CRTC_INDEX) {
+ name = "CRTC";
+ idx = intel_crtc->base.base.id;
+
+ /* panel fitter case: assign as a crtc scaler */
+ scaler_id = &scaler_state->scaler_id;
+ } else {
+ name = "PLANE";
+
+ /* plane scaler case: assign as a plane scaler */
+ /* find the plane that set the bit as scaler_user */
+ plane = drm_state->planes[i].ptr;
+
+ /*
+ * to enable/disable hq mode, add planes that are using scaler
+ * into this transaction
+ */
+ if (!plane) {
+ struct drm_plane_state *state;
+
+ /*
+ * GLK+ scalers don't have a HQ mode so it
+ * isn't necessary to change between HQ and dyn mode
+ * on those platforms.
+ */
+ if (DISPLAY_VER(dev_priv) >= 10)
+ continue;
+
+ plane = drm_plane_from_index(&dev_priv->drm, i);
+ state = drm_atomic_get_plane_state(drm_state, plane);
+ if (IS_ERR(state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to add [PLANE:%d] to drm_state\n",
+ plane->base.id);
+ return PTR_ERR(state);
+ }
+ }
+
+ intel_plane = to_intel_plane(plane);
+ idx = plane->base.id;
+
+ /* plane on different crtc cannot be a scaler user of this crtc */
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_plane->pipe != intel_crtc->pipe))
+ continue;
+
+ plane_state = intel_atomic_get_new_plane_state(intel_state,
+ intel_plane);
+ scaler_id = &plane_state->scaler_id;
+ }
+
+ ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int glk_coef_tap(int i)
{
return i % 7;
@@ -401,7 +658,7 @@ static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
int i;
intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
- PS_COEE_INDEX_AUTO_INC);
+ PS_COEF_INDEX_AUTO_INC);
for (i = 0; i < 17 * 7; i += 2) {
u32 tmp;
@@ -484,8 +741,8 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
id = scaler_state->scaler_id;
- ps_ctrl = skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
- ps_ctrl |= PS_SCALER_EN | scaler_state->scalers[id].mode;
+ ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
+ skl_scaler_get_filter_select(crtc_state->hw.scaling_filter, 0);
skl_scaler_setup_filter(dev_priv, pipe, id, 0,
crtc_state->hw.scaling_filter);
@@ -497,9 +754,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
- x << 16 | y);
+ PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
- width << 16 | height);
+ PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
}
void
@@ -547,8 +804,8 @@ skl_program_plane_scaler(struct intel_plane *plane,
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
}
- ps_ctrl = skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
- ps_ctrl |= PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode;
+ ps_ctrl = PS_SCALER_EN | PS_BINDING_PLANE(plane->id) | scaler->mode |
+ skl_scaler_get_filter_select(plane_state->hw.scaling_filter, 0);
skl_scaler_setup_filter(dev_priv, pipe, scaler_id, 0,
plane_state->hw.scaling_filter);
@@ -559,9 +816,9 @@ skl_program_plane_scaler(struct intel_plane *plane,
intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
- (crtc_x << 16) | crtc_y);
+ PS_WIN_XPOS(crtc_x) | PS_WIN_YPOS(crtc_y));
intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
- (crtc_w << 16) | crtc_h);
+ PS_WIN_XSIZE(crtc_w) | PS_WIN_YSIZE(crtc_h));
}
static void skl_detach_scaler(struct intel_crtc *crtc, int id)
@@ -599,3 +856,42 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state)
for (i = 0; i < crtc->num_scalers; i++)
skl_detach_scaler(crtc, i);
}
+
+void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
+ int id = -1;
+ int i;
+
+ /* find scaler attached to this pipe */
+ for (i = 0; i < crtc->num_scalers; i++) {
+ u32 ctl, pos, size;
+
+ ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
+ if ((ctl & (PS_SCALER_EN | PS_BINDING_MASK)) != (PS_SCALER_EN | PS_BINDING_PIPE))
+ continue;
+
+ id = i;
+ crtc_state->pch_pfit.enabled = true;
+
+ pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
+ size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
+
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
+
+ scaler_state->scalers[i].in_use = true;
+ break;
+ }
+
+ scaler_state->scaler_id = id;
+ if (id >= 0)
+ scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
+ else
+ scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 0097d5d08e10..63f93ca03c89 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -8,17 +8,22 @@
#include <linux/types.h>
enum drm_scaling_filter;
+enum pipe;
struct drm_i915_private;
+struct intel_crtc;
struct intel_crtc_state;
-struct intel_plane_state;
struct intel_plane;
-enum pipe;
+struct intel_plane_state;
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
+
void skl_pfit_enable(const struct intel_crtc_state *crtc_state);
void skl_program_plane_scaler(struct intel_plane *plane,
@@ -26,4 +31,7 @@ void skl_program_plane_scaler(struct intel_plane *plane,
const struct intel_plane_state *plane_state);
void skl_detach_scalers(const struct intel_crtc_state *crtc_state);
void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
+
+void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 61d008d4e5f1..cd90a30e04d8 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -280,6 +280,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
int ret;
drm_dbg_kms(&dev_priv->drm, "\n");
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
ret = intel_panel_compute_config(intel_connector, adjusted_mode);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 1b25a6039152..c0f3ff4746ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 28f27091cd3b..ee2b44f896a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -451,6 +451,33 @@ static ssize_t punit_req_freq_mhz_show(struct kobject *kobj,
return sysfs_emit(buff, "%u\n", preq);
}
+static ssize_t slpc_ignore_eff_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+
+ return sysfs_emit(buff, "%u\n", slpc->ignore_eff_freq);
+}
+
+static ssize_t slpc_ignore_eff_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name);
+ struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+ int err;
+ u32 val;
+
+ err = kstrtou32(buff, 0, &val);
+ if (err)
+ return err;
+
+ err = intel_guc_slpc_set_ignore_eff_freq(slpc, val);
+ return err ?: count;
+}
+
struct intel_gt_bool_throttle_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
@@ -663,6 +690,8 @@ static struct kobj_attribute attr_media_freq_factor_scale =
INTEL_GT_ATTR_RO(media_RP0_freq_mhz);
INTEL_GT_ATTR_RO(media_RPn_freq_mhz);
+INTEL_GT_ATTR_RW(slpc_ignore_eff_freq);
+
static const struct attribute *media_perf_power_attrs[] = {
&attr_media_freq_factor.attr,
&attr_media_freq_factor_scale.attr,
@@ -744,6 +773,12 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
if (ret)
gt_warn(gt, "failed to create punit_req_freq_mhz sysfs (%pe)", ERR_PTR(ret));
+ if (intel_uc_uses_guc_slpc(&gt->uc)) {
+ ret = sysfs_create_file(kobj, &attr_slpc_ignore_eff_freq.attr);
+ if (ret)
+ gt_warn(gt, "failed to create ignore_eff_freq sysfs (%pe)", ERR_PTR(ret));
+ }
+
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 797ea8340467..195ff72d7a14 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -7,7 +7,7 @@
#include <linux/stop_machine.h>
#include <linux/string_helpers.h>
-#include "display/intel_display.h"
+#include "display/intel_display_reset.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -20,6 +20,7 @@
#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -1370,11 +1371,11 @@ static void intel_gt_reset_global(struct intel_gt *gt,
/* Use a watchdog to ensure that our reset completes */
intel_wedge_on_timeout(&w, gt, 60 * HZ) {
- intel_display_prepare_reset(gt->i915);
+ intel_display_reset_prepare(gt->i915);
intel_gt_reset(gt, engine_mask, reason);
- intel_display_finish_reset(gt->i915);
+ intel_display_reset_finish(gt->i915);
}
if (!test_bit(I915_WEDGED, &gt->reset.flags))
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index b2671ac59dc0..80968e49e2c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -10,6 +10,7 @@
#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 84e77e8dbba1..fb30f733b036 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_internal.h"
+#include "i915_reg.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index d76508fa3af7..e89f16ecf1ae 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -16,6 +16,7 @@
#include "intel_guc_submission.h"
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
/**
* DOC: GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index cf49188db6a6..e0e793167d61 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -31,12 +31,14 @@
{ FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
#define COMMON_GEN9BASE_GLOBAL \
- { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
- { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
{ ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
{ DONE_REG, 0, 0, "DONE_REG" }, \
{ HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
+#define GEN9_GLOBAL \
+ { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
+ { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }
+
#define COMMON_GEN12BASE_GLOBAL \
{ GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
{ GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
@@ -142,6 +144,7 @@ static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
static const struct __guc_mmio_reg_descr default_global_regs[] = {
COMMON_BASE_GLOBAL,
COMMON_GEN9BASE_GLOBAL,
+ GEN9_GLOBAL,
};
static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 026d73855f36..56dbba1ef668 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -277,6 +277,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->max_freq_softlimit = 0;
slpc->min_freq_softlimit = 0;
+ slpc->ignore_eff_freq = false;
slpc->min_is_rpmax = false;
slpc->boost_freq = 0;
@@ -457,6 +458,29 @@ int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret;
+
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val);
+ if (ret)
+ guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
+ val, ERR_PTR(ret));
+ else
+ slpc->ignore_eff_freq = val;
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+ return ret;
+}
+
/**
* intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
* @slpc: pointer to intel_guc_slpc.
@@ -482,16 +506,6 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
mutex_lock(&slpc->lock);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- /* Ignore efficient freq if lower min freq is requested */
- ret = slpc_set_param(slpc,
- SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
- val < slpc->rp1_freq);
- if (ret) {
- guc_probe_error(slpc_to_guc(slpc), "Failed to toggle efficient freq: %pe\n",
- ERR_PTR(ret));
- goto out;
- }
-
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
val);
@@ -499,7 +513,6 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
if (!ret)
slpc->min_freq_softlimit = val;
-out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&slpc->lock);
@@ -752,6 +765,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Set cached value of ignore efficient freq */
+ intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 17ed515f6a85..597eb5413ddf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -46,5 +46,6 @@ void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc);
int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode);
+int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index a6ef53b04e04..a88651331497 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -31,6 +31,7 @@ struct intel_guc_slpc {
/* frequency softlimits */
u32 min_freq_softlimit;
u32 max_freq_softlimit;
+ bool ignore_eff_freq;
/* cached media ratio mode */
u32 media_ratio_mode;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 4b45a041ac5c..a9f7fa9b90bd 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1562,7 +1562,7 @@ static int pf_write(struct intel_vgpu *vgpu,
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
- offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
+ offset == _PS_1C_CTRL) && (val & PS_BINDING_MASK) != PS_BINDING_PIPE) {
drm_WARN_ONCE(&i915->drm, true,
"VM(%d): guest is trying to scaling a plane\n",
vgpu->id);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 80c2bf98e341..41389a32e998 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -50,6 +50,7 @@
#include "i915_debugfs_params.h"
#include "i915_driver.h"
#include "i915_irq.h"
+#include "i915_reg.h"
#include "i915_scheduler.h"
#include "intel_mchbar_regs.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 93fdc40d724f..fd198700272b 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -48,6 +48,7 @@
#include "display/intel_acpi.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_display_driver.h"
#include "display/intel_display_types.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
@@ -222,7 +223,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->display.wm.wm_mutex);
mutex_init(&dev_priv->display.pps.mutex);
mutex_init(&dev_priv->display.hdcp.comp_mutex);
- spin_lock_init(&dev_priv->display.dkl.phy_lock);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -251,7 +251,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_detect_pch(dev_priv);
intel_irq_init(dev_priv);
- intel_init_display_hooks(dev_priv);
+ intel_display_driver_early_probe(dev_priv);
intel_clock_gating_hooks_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -720,8 +720,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
- struct intel_device_info *device_info;
- struct intel_runtime_info *runtime;
struct drm_i915_private *i915;
i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
@@ -734,14 +732,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
- /* Setup the write-once "constant" device info */
- device_info = mkwrite_device_info(i915);
- memcpy(device_info, match_info, sizeof(*device_info));
-
- /* Initialize initial runtime info from static const data and pdev. */
- runtime = RUNTIME_INFO(i915);
- memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
- runtime->device_id = pdev->device;
+ /* Set up device info and initial runtime info. */
+ intel_device_info_driver_create(i915, pdev->device, match_info);
return i915;
}
@@ -752,7 +744,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
* @ent: matching PCI ID entry
*
* The driver probe routine has to do several things:
- * - drive output discovery via intel_modeset_init()
+ * - drive output discovery via intel_display_driver_probe()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
@@ -790,7 +782,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- ret = intel_modeset_init_noirq(i915);
+ ret = intel_display_driver_probe_noirq(i915);
if (ret < 0)
goto out_cleanup_hw;
@@ -798,7 +790,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset;
- ret = intel_modeset_init_nogem(i915);
+ ret = intel_display_driver_probe_nogem(i915);
if (ret)
goto out_cleanup_irq;
@@ -808,7 +800,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_pxp_init(i915);
- ret = intel_modeset_init(i915);
+ ret = intel_display_driver_probe(i915);
if (ret)
goto out_cleanup_gem;
@@ -828,14 +820,14 @@ out_cleanup_gem:
i915_gem_driver_release(i915);
out_cleanup_modeset2:
/* FIXME clean up the error path */
- intel_modeset_driver_remove(i915);
+ intel_display_driver_remove(i915);
intel_irq_uninstall(i915);
- intel_modeset_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(i915);
goto out_cleanup_modeset;
out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
- intel_modeset_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(i915);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -871,16 +863,16 @@ void i915_driver_remove(struct drm_i915_private *i915)
intel_gvt_driver_remove(i915);
- intel_modeset_driver_remove(i915);
+ intel_display_driver_remove(i915);
intel_irq_uninstall(i915);
- intel_modeset_driver_remove_noirq(i915);
+ intel_display_driver_remove_noirq(i915);
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
- intel_modeset_driver_remove_nogem(i915);
+ intel_display_driver_remove_nogem(i915);
i915_driver_hw_remove(i915);
@@ -1052,7 +1044,7 @@ static int i915_drm_prepare(struct drm_device *dev)
intel_pxp_suspend_prepare(i915->pxp);
/*
- * NB intel_display_suspend() may issue new requests after we've
+ * NB intel_display_driver_suspend() may issue new requests after we've
* ostensibly marked the GPU as ready-to-sleep here. We need to
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
@@ -1076,7 +1068,7 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- intel_display_suspend(dev);
+ intel_display_driver_suspend(dev_priv);
intel_dp_mst_suspend(dev_priv);
@@ -1233,7 +1225,7 @@ static int i915_drm_resume(struct drm_device *dev)
*
* drm_mode_config_reset() needs AUX interrupts.
*
- * Modeset enabling in intel_modeset_init_hw() also needs working
+ * Modeset enabling in intel_display_driver_init_hw() also needs working
* interrupts.
*/
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1243,13 +1235,14 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
- intel_modeset_init_hw(dev_priv);
+ intel_display_driver_init_hw(dev_priv);
+
intel_clock_gating_init(dev_priv);
intel_hpd_init(dev_priv);
/* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(dev_priv);
- intel_display_resume(dev);
+ intel_display_driver_resume(dev_priv);
intel_hpd_poll_disable(dev_priv);
if (HAS_DISPLAY(dev_priv))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e771fdc3099c..fe7eeafe9cff 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -931,11 +931,4 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_LMEMBAR_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \
GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
-/* intel_device_info.c */
-static inline struct intel_device_info *
-mkwrite_device_info(struct drm_i915_private *dev_priv)
-{
- return (struct intel_device_info *)INTEL_INFO(dev_priv);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d24bdea65a3d..e0a4fddccac5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -54,6 +54,7 @@
#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_irq.h"
+#include "i915_reg.h"
/**
* DOC: interrupt handling
@@ -83,6 +84,7 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915,
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder);
+typedef u32 (*hotplug_mask_func)(enum hpd_pin pin);
static const u32 hpd_ilk[HPD_NUM_PINS] = {
[HPD_PORT_A] = DE_DP_A_HOTPLUG,
@@ -162,6 +164,13 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
};
+static const u32 hpd_xelpdp[HPD_NUM_PINS] = {
+ [HPD_PORT_TC1] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC1) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC1),
+ [HPD_PORT_TC2] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC2) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC2),
+ [HPD_PORT_TC3] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC3) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC3),
+ [HPD_PORT_TC4] = XELPDP_TBT_HOTPLUG(HPD_PORT_TC4) | XELPDP_DP_ALT_HOTPLUG(HPD_PORT_TC4),
+};
+
static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
@@ -182,6 +191,15 @@ static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
[HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
};
+static const u32 hpd_mtp[HPD_NUM_PINS] = {
+ [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
+ [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
+ [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
+ [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
+ [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
+ [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
+};
+
static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
{
struct intel_hotplug *hpd = &dev_priv->display.hotplug;
@@ -195,7 +213,9 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
return;
}
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(dev_priv) >= 14)
+ hpd->hpd = hpd_xelpdp;
+ else if (DISPLAY_VER(dev_priv) >= 11)
hpd->hpd = hpd_gen11;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
hpd->hpd = hpd_bxt;
@@ -214,6 +234,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
hpd->pch_hpd = hpd_sde_dg1;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MTP)
+ hpd->pch_hpd = hpd_mtp;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
hpd->pch_hpd = hpd_icp;
else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
@@ -879,6 +901,18 @@ static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
return hotplug_irqs;
}
+static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915,
+ hotplug_mask_func hotplug_mask)
+{
+ enum hpd_pin pin;
+ u32 hotplug = 0;
+
+ for_each_hpd_pin(pin)
+ hotplug |= hotplug_mask(pin);
+
+ return hotplug;
+}
+
static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
hotplug_enables_func hotplug_enables)
{
@@ -1559,6 +1593,44 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
cpt_serr_int_handler(dev_priv);
}
+static void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
+{
+ enum hpd_pin pin;
+ u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
+ u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
+ u32 pin_mask = 0, long_mask = 0;
+
+ for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
+ u32 val;
+
+ if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger))
+ continue;
+
+ pin_mask |= BIT(pin);
+
+ val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin));
+ intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val);
+
+ if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT))
+ long_mask |= BIT(pin);
+ }
+
+ if (pin_mask) {
+ drm_dbg(&i915->drm,
+ "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, pin_mask, long_mask);
+
+ intel_hpd_irq_handler(i915, pin_mask, long_mask);
+ }
+
+ if (trigger_aux)
+ dp_aux_irq_handler(i915);
+
+ if (!pin_mask && !trigger_aux)
+ drm_err(&i915->drm,
+ "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir);
+}
+
static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
@@ -1866,7 +1938,10 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
{
u32 mask;
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(dev_priv) >= 14)
+ return TGL_DE_PORT_AUX_DDIA |
+ TGL_DE_PORT_AUX_DDIB;
+ else if (DISPLAY_VER(dev_priv) >= 13)
return TGL_DE_PORT_AUX_DDIA |
TGL_DE_PORT_AUX_DDIB |
TGL_DE_PORT_AUX_DDIC |
@@ -2029,6 +2104,34 @@ u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
return mask;
}
+static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
+{
+ u32 pica_ier = 0;
+
+ *pica_iir = 0;
+ *pch_iir = intel_de_read(i915, SDEIIR);
+ if (!*pch_iir)
+ return;
+
+ /**
+ * PICA IER must be disabled/re-enabled around clearing PICA IIR and
+ * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
+ * their flags both in the PICA and SDE IIR.
+ */
+ if (*pch_iir & SDE_PICAINTERRUPT) {
+ drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
+
+ pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
+ *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
+ intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
+ }
+
+ intel_de_write(i915, SDEIIR, *pch_iir);
+
+ if (pica_ier)
+ intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2153,16 +2256,20 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
master_ctl & GEN8_DE_PCH_IRQ) {
+ u32 pica_iir;
+
/*
* FIXME(BDW): Assume for now that the new interrupt handling
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
- iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
+ gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
if (iir) {
- intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
ret = IRQ_HANDLED;
+ if (pica_iir)
+ xelpdp_pica_irq_handler(dev_priv, pica_iir);
+
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir);
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
@@ -2740,7 +2847,11 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
- GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ GEN3_IRQ_RESET(uncore, PICAINTERRUPT_);
+ else
+ GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
GEN3_IRQ_RESET(uncore, SDE);
@@ -2837,6 +2948,22 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
+static u32 ibx_hotplug_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
+ case HPD_PORT_A:
+ return PORTA_HOTPLUG_ENABLE;
+ case HPD_PORT_B:
+ return PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_MASK;
+ case HPD_PORT_C:
+ return PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_MASK;
+ case HPD_PORT_D:
+ return PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_MASK;
+ default:
+ return 0;
+ }
+}
+
static u32 ibx_hotplug_enables(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -2871,16 +2998,19 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
* The pulse duration bits are reserved on LPT+.
*/
intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- PORTA_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE |
- PORTC_HOTPLUG_ENABLE |
- PORTD_HOTPLUG_ENABLE |
- PORTB_PULSE_DURATION_MASK |
- PORTC_PULSE_DURATION_MASK |
- PORTD_PULSE_DURATION_MASK,
+ intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables));
}
+static void ibx_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
+ ibx_hotplug_mask(encoder->hpd_pin),
+ ibx_hotplug_enables(encoder));
+}
+
static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
@@ -2893,56 +3023,82 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_detection_setup(dev_priv);
}
-static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
+static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
{
- switch (encoder->hpd_pin) {
+ switch (hpd_pin) {
case HPD_PORT_A:
case HPD_PORT_B:
case HPD_PORT_C:
case HPD_PORT_D:
- return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin);
+ return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin);
default:
return 0;
}
}
-static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
+static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder)
{
- switch (encoder->hpd_pin) {
+ return icp_ddi_hotplug_mask(encoder->hpd_pin);
+}
+
+static u32 icp_tc_hotplug_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return ICP_TC_HPD_ENABLE(encoder->hpd_pin);
+ return ICP_TC_HPD_ENABLE(hpd_pin);
default:
return 0;
}
}
+static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder)
+{
+ return icp_tc_hotplug_mask(encoder->hpd_pin);
+}
+
static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI,
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
- SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D),
+ intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables));
}
+static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI,
+ icp_ddi_hotplug_mask(encoder->hpd_pin),
+ icp_ddi_hotplug_enables(encoder));
+}
+
static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC,
- ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
- ICP_TC_HPD_ENABLE(HPD_PORT_TC6),
+ intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables));
}
+static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC,
+ icp_tc_hotplug_mask(encoder->hpd_pin),
+ icp_tc_hotplug_enables(encoder));
+}
+
+static void icp_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ icp_ddi_hpd_enable_detection(encoder);
+ icp_tc_hpd_enable_detection(encoder);
+}
+
static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
@@ -2959,21 +3115,26 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_tc_hpd_detection_setup(dev_priv);
}
-static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
+static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin)
{
- switch (encoder->hpd_pin) {
+ switch (hpd_pin) {
case HPD_PORT_TC1:
case HPD_PORT_TC2:
case HPD_PORT_TC3:
case HPD_PORT_TC4:
case HPD_PORT_TC5:
case HPD_PORT_TC6:
- return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin);
+ return GEN11_HOTPLUG_CTL_ENABLE(hpd_pin);
default:
return 0;
}
}
+static u32 gen11_hotplug_enables(struct intel_encoder *encoder)
+{
+ return gen11_hotplug_mask(encoder->hpd_pin);
+}
+
static void dg1_hpd_invert(struct drm_i915_private *i915)
{
u32 val = (INVERT_DDIA_HPD |
@@ -2983,6 +3144,14 @@ static void dg1_hpd_invert(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val);
}
+static void dg1_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ dg1_hpd_invert(i915);
+ icp_hpd_enable_detection(encoder);
+}
+
static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
dg1_hpd_invert(dev_priv);
@@ -2992,27 +3161,46 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL,
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
+ intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
}
+static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
+}
+
static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL,
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
- GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6),
+ intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables));
}
+static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL,
+ gen11_hotplug_mask(encoder->hpd_pin),
+ gen11_hotplug_enables(encoder));
+}
+
+static void gen11_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ gen11_tc_hpd_enable_detection(encoder);
+ gen11_tbt_hpd_enable_detection(encoder);
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ icp_hpd_enable_detection(encoder);
+}
+
static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
@@ -3031,9 +3219,177 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
icp_hpd_irq_setup(dev_priv);
}
-static u32 spt_hotplug_enables(struct intel_encoder *encoder)
+static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin)
{
- switch (encoder->hpd_pin) {
+ switch (hpd_pin) {
+ case HPD_PORT_A:
+ case HPD_PORT_B:
+ return SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin);
+ default:
+ return 0;
+ }
+}
+
+static u32 mtp_ddi_hotplug_enables(struct intel_encoder *encoder)
+{
+ return mtp_ddi_hotplug_mask(encoder->hpd_pin);
+}
+
+static u32 mtp_tc_hotplug_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
+ case HPD_PORT_TC1:
+ case HPD_PORT_TC2:
+ case HPD_PORT_TC3:
+ case HPD_PORT_TC4:
+ return ICP_TC_HPD_ENABLE(hpd_pin);
+ default:
+ return 0;
+ }
+}
+
+static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder)
+{
+ return mtp_tc_hotplug_mask(encoder->hpd_pin);
+}
+
+static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915)
+{
+ intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask),
+ intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables));
+}
+
+static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ mtp_ddi_hotplug_mask(encoder->hpd_pin),
+ mtp_ddi_hotplug_enables(encoder));
+}
+
+static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915)
+{
+ intel_de_rmw(i915, SHOTPLUG_CTL_TC,
+ intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask),
+ intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables));
+}
+
+static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_de_rmw(i915, SHOTPLUG_CTL_DDI,
+ mtp_tc_hotplug_mask(encoder->hpd_pin),
+ mtp_tc_hotplug_enables(encoder));
+}
+
+static void mtp_hpd_invert(struct drm_i915_private *i915)
+{
+ u32 val = (INVERT_DDIA_HPD |
+ INVERT_DDIB_HPD |
+ INVERT_DDIC_HPD |
+ INVERT_TC1_HPD |
+ INVERT_TC2_HPD |
+ INVERT_TC3_HPD |
+ INVERT_TC4_HPD |
+ INVERT_DDID_HPD_MTP |
+ INVERT_DDIE_HPD);
+ intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val);
+}
+
+static void mtp_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ mtp_hpd_invert(i915);
+ mtp_ddi_hpd_enable_detection(encoder);
+ mtp_tc_hpd_enable_detection(encoder);
+}
+
+static void mtp_hpd_irq_setup(struct drm_i915_private *i915)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd);
+
+ intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
+
+ mtp_hpd_invert(i915);
+ ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs);
+
+ mtp_ddi_hpd_detection_setup(i915);
+ mtp_tc_hpd_detection_setup(i915);
+}
+
+static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin)
+{
+ return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4;
+}
+
+static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915,
+ enum hpd_pin hpd_pin, bool enable)
+{
+ u32 mask = XELPDP_TBT_HOTPLUG_ENABLE |
+ XELPDP_DP_ALT_HOTPLUG_ENABLE;
+
+ if (!is_xelpdp_pica_hpd_pin(hpd_pin))
+ return;
+
+ intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin),
+ mask, enable ? mask : 0);
+}
+
+static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true);
+}
+
+static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915)
+{
+ struct intel_encoder *encoder;
+ u32 available_pins = 0;
+ enum hpd_pin pin;
+
+ BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS);
+
+ for_each_intel_encoder(&i915->drm, encoder)
+ available_pins |= BIT(encoder->hpd_pin);
+
+ for_each_hpd_pin(pin)
+ _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin));
+}
+
+static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ xelpdp_pica_hpd_enable_detection(encoder);
+ mtp_hpd_enable_detection(encoder);
+}
+
+static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd);
+ hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd);
+
+ intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs,
+ ~enabled_irqs & hotplug_irqs);
+ intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR);
+
+ xelpdp_pica_hpd_detection_setup(i915);
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTP)
+ mtp_hpd_irq_setup(i915);
+}
+
+static u32 spt_hotplug_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
case HPD_PORT_A:
return PORTA_HOTPLUG_ENABLE;
case HPD_PORT_B:
@@ -3047,9 +3403,14 @@ static u32 spt_hotplug_enables(struct intel_encoder *encoder)
}
}
-static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
+static u32 spt_hotplug_enables(struct intel_encoder *encoder)
{
- switch (encoder->hpd_pin) {
+ return spt_hotplug_mask(encoder->hpd_pin);
+}
+
+static u32 spt_hotplug2_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
case HPD_PORT_E:
return PORTE_HOTPLUG_ENABLE;
default:
@@ -3057,6 +3418,11 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
}
}
+static u32 spt_hotplug2_enables(struct intel_encoder *encoder)
+{
+ return spt_hotplug2_mask(encoder->hpd_pin);
+}
+
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
/* Display WA #1179 WaHardHangonHotPlug: cnp */
@@ -3067,16 +3433,34 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
/* Enable digital hotplug on the PCH */
intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- PORTA_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE |
- PORTC_HOTPLUG_ENABLE |
- PORTD_HOTPLUG_ENABLE,
+ intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables));
- intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, PORTE_HOTPLUG_ENABLE,
+ intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2,
+ intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask),
intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables));
}
+static void spt_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ /* Display WA #1179 WaHardHangonHotPlug: cnp */
+ if (HAS_PCH_CNP(i915)) {
+ intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1,
+ CHASSIS_CLK_REQ_DURATION_MASK,
+ CHASSIS_CLK_REQ_DURATION(0xf));
+ }
+
+ intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
+ spt_hotplug_mask(encoder->hpd_pin),
+ spt_hotplug_enables(encoder));
+
+ intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2,
+ spt_hotplug2_mask(encoder->hpd_pin),
+ spt_hotplug2_enables(encoder));
+}
+
static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
@@ -3092,6 +3476,17 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
+static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
+ case HPD_PORT_A:
+ return DIGITAL_PORTA_HOTPLUG_ENABLE |
+ DIGITAL_PORTA_PULSE_DURATION_MASK;
+ default:
+ return 0;
+ }
+}
+
static u32 ilk_hotplug_enables(struct intel_encoder *encoder)
{
switch (encoder->hpd_pin) {
@@ -3111,10 +3506,21 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
* The pulse duration bits are reserved on HSW+.
*/
intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
- DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK,
+ intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables));
}
+static void ilk_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL,
+ ilk_hotplug_mask(encoder->hpd_pin),
+ ilk_hotplug_enables(encoder));
+
+ ibx_hpd_enable_detection(encoder);
+}
+
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
@@ -3132,6 +3538,20 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
+static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin)
+{
+ switch (hpd_pin) {
+ case HPD_PORT_A:
+ return PORTA_HOTPLUG_ENABLE | BXT_DDIA_HPD_INVERT;
+ case HPD_PORT_B:
+ return PORTB_HOTPLUG_ENABLE | BXT_DDIB_HPD_INVERT;
+ case HPD_PORT_C:
+ return PORTC_HOTPLUG_ENABLE | BXT_DDIC_HPD_INVERT;
+ default:
+ return 0;
+ }
+}
+
static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
{
u32 hotplug;
@@ -3160,13 +3580,19 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder)
static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG,
- PORTA_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE |
- PORTC_HOTPLUG_ENABLE |
- BXT_DDI_HPD_INVERT_MASK,
+ intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask),
intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables));
}
+static void bxt_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG,
+ bxt_hotplug_mask(encoder->hpd_pin),
+ bxt_hotplug_enables(encoder));
+}
+
static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, enabled_irqs;
@@ -3363,7 +3789,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
@@ -3373,6 +3799,20 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
+static void mtp_irq_postinstall(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
+ u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
+ u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
+ XELPDP_TBT_HOTPLUG_MASK;
+
+ GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask,
+ de_hpd_enables);
+
+ GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
+}
+
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@@ -3434,7 +3874,11 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
if (HAS_DISPLAY(dev_priv)) {
- icp_irq_postinstall(dev_priv);
+ if (DISPLAY_VER(dev_priv) >= 14)
+ mtp_irq_postinstall(dev_priv);
+ else
+ icp_irq_postinstall(dev_priv);
+
gen8_de_irq_postinstall(dev_priv);
intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
GEN11_DISPLAY_IRQ_ENABLE);
@@ -3826,6 +4270,15 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
i915_enable_asle_pipestat(dev_priv);
}
+static void i915_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
+
+ /* HPD sense and interrupt enable are one and the same */
+ i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
+}
+
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_en;
@@ -3911,15 +4364,20 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
}
struct intel_hotplug_funcs {
+ /* Enable HPD sense and interrupts for all present encoders */
void (*hpd_irq_setup)(struct drm_i915_private *i915);
+ /* Enable HPD sense for a single encoder */
+ void (*hpd_enable_detection)(struct intel_encoder *encoder);
};
#define HPD_FUNCS(platform) \
static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
.hpd_irq_setup = platform##_hpd_irq_setup, \
+ .hpd_enable_detection = platform##_hpd_enable_detection, \
}
HPD_FUNCS(i915);
+HPD_FUNCS(xelpdp);
HPD_FUNCS(dg1);
HPD_FUNCS(gen11);
HPD_FUNCS(bxt);
@@ -3928,6 +4386,14 @@ HPD_FUNCS(spt);
HPD_FUNCS(ilk);
#undef HPD_FUNCS
+void intel_hpd_enable_detection(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (i915->display.funcs.hotplug)
+ i915->display.funcs.hotplug->hpd_enable_detection(encoder);
+}
+
void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
@@ -3980,6 +4446,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
else if (HAS_PCH_DG1(dev_priv))
dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
+ else if (DISPLAY_VER(dev_priv) >= 14)
+ dev_priv->display.funcs.hotplug = &xelpdp_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
@@ -4135,7 +4603,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
/*
* FIXME we can get called twice during driver probe
* error handling as well as during driver remove due to
- * intel_modeset_driver_remove() calling us out of sequence.
+ * intel_display_driver_remove() calling us out of sequence.
* Would be nice if it didn't do that...
*/
if (!dev_priv->irq_enabled)
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 03ee4c8b1ed3..dd47e473ba4f 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -9,7 +9,7 @@
#include <linux/ktime.h>
#include <linux/types.h>
-#include "i915_reg.h"
+#include "i915_reg_defs.h"
enum pipe;
struct drm_crtc;
@@ -17,6 +17,7 @@ struct drm_device;
struct drm_display_mode;
struct drm_i915_private;
struct intel_crtc;
+struct intel_encoder;
struct intel_uncore;
void intel_irq_init(struct drm_i915_private *dev_priv);
@@ -37,6 +38,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
+void intel_hpd_enable_detection(struct intel_encoder *encoder);
void intel_hpd_irq_setup(struct drm_i915_private *i915);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
u32 mask,
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 2a012da8ccfa..5fcc9cfed671 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -27,6 +27,7 @@
#include <drm/i915_pciids.h>
#include "display/intel_display.h"
+#include "display/intel_display_driver.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_sa_media.h"
@@ -1148,6 +1149,7 @@ static const struct intel_device_info mtl_info = {
.has_flat_ccs = 0,
.has_gmd_id = 1,
.has_guc_deprivilege = 1,
+ .has_llc = 0,
.has_mslice_steering = 0,
.has_snoop = 1,
.__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
@@ -1356,7 +1358,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENXIO;
/* Detect if we need to wait for other drivers early on */
- if (intel_modeset_probe_defer(pdev))
+ if (intel_display_driver_probe_defer(pdev))
return -EPROBE_DEFER;
err = i915_driver_probe(pdev, ent);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c4197e31962e..f82b2b245b0a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1343,9 +1343,9 @@
#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
-#define ILK_FBCQ_DIS (1 << 22)
-#define ILK_PABSTRETCH_DIS REG_BIT(21)
-#define ILK_SABSTRETCH_DIS REG_BIT(20)
+#define ILK_FBCQ_DIS REG_BIT(22)
+#define ILK_PABSTRETCH_DIS REG_BIT(21)
+#define ILK_SABSTRETCH_DIS REG_BIT(20)
#define IVB_PRI_STRETCH_MAX_MASK REG_GENMASK(21, 20)
#define IVB_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 0)
#define IVB_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(IVB_PRI_STRETCH_MAX_MASK, 1)
@@ -1809,6 +1809,11 @@
#define CLKGATE_DIS_PSL_EXT(pipe) \
_MMIO_PIPE(pipe, _CLKGATE_DIS_PSL_EXT_A, _CLKGATE_DIS_PSL_EXT_B)
+/* DDI Buffer Control */
+#define _DDI_CLK_VALFREQ_A 0x64030
+#define _DDI_CLK_VALFREQ_B 0x64130
+#define DDI_CLK_VALFREQ(port) _MMIO_PORT(port, _DDI_CLK_VALFREQ_A, _DDI_CLK_VALFREQ_B)
+
/*
* Display engine regs
*/
@@ -2332,35 +2337,33 @@
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
-#define PFIT_ENABLE (1 << 31)
-#define PFIT_PIPE_MASK (3 << 29)
-#define PFIT_PIPE_SHIFT 29
-#define PFIT_PIPE(pipe) ((pipe) << 29)
-#define VERT_INTERP_DISABLE (0 << 10)
-#define VERT_INTERP_BILINEAR (1 << 10)
-#define VERT_INTERP_MASK (3 << 10)
-#define VERT_AUTO_SCALE (1 << 9)
-#define HORIZ_INTERP_DISABLE (0 << 6)
-#define HORIZ_INTERP_BILINEAR (1 << 6)
-#define HORIZ_INTERP_MASK (3 << 6)
-#define HORIZ_AUTO_SCALE (1 << 5)
-#define PANEL_8TO6_DITHER_ENABLE (1 << 3)
-#define PFIT_FILTER_FUZZY (0 << 24)
-#define PFIT_SCALING_AUTO (0 << 26)
-#define PFIT_SCALING_PROGRAMMED (1 << 26)
-#define PFIT_SCALING_PILLAR (2 << 26)
-#define PFIT_SCALING_LETTER (3 << 26)
+#define PFIT_ENABLE REG_BIT(31)
+#define PFIT_PIPE_MASK REG_GENMASK(30, 29) /* 965+ */
+#define PFIT_PIPE(pipe) REG_FIELD_PREP(PFIT_PIPE_MASK, (pipe))
+#define PFIT_SCALING_MASK REG_GENMASK(28, 26) /* 965+ */
+#define PFIT_SCALING_AUTO REG_FIELD_PREP(PFIT_SCALING_MASK, 0)
+#define PFIT_SCALING_PROGRAMMED REG_FIELD_PREP(PFIT_SCALING_MASK, 1)
+#define PFIT_SCALING_PILLAR REG_FIELD_PREP(PFIT_SCALING_MASK, 2)
+#define PFIT_SCALING_LETTER REG_FIELD_PREP(PFIT_SCALING_MASK, 3)
+#define PFIT_FILTER_MASK REG_GENMASK(25, 24) /* 965+ */
+#define PFIT_FILTER_FUZZY REG_FIELD_PREP(PFIT_FILTER_MASK, 0)
+#define PFIT_FILTER_CRISP REG_FIELD_PREP(PFIT_FILTER_MASK, 1)
+#define PFIT_FILTER_MEDIAN REG_FIELD_PREP(PFIT_FILTER_MASK, 2)
+#define PFIT_VERT_INTERP_MASK REG_GENMASK(11, 10) /* pre-965 */
+#define PFIT_VERT_INTERP_BILINEAR REG_FIELD_PREP(PFIT_VERT_INTERP_MASK, 1)
+#define PFIT_VERT_AUTO_SCALE REG_BIT(9) /* pre-965 */
+#define PFIT_HORIZ_INTERP_MASK REG_GENMASK(7, 6) /* pre-965 */
+#define PFIT_HORIZ_INTERP_BILINEAR REG_FIELD_PREP(PFIT_HORIZ_INTERP_MASK, 1)
+#define PFIT_HORIZ_AUTO_SCALE REG_BIT(5) /* pre-965 */
+#define PFIT_PANEL_8TO6_DITHER_ENABLE REG_BIT(3) /* pre-965 */
+
#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
-/* Pre-965 */
-#define PFIT_VERT_SCALE_SHIFT 20
-#define PFIT_VERT_SCALE_MASK 0xfff00000
-#define PFIT_HORIZ_SCALE_SHIFT 4
-#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
-/* 965+ */
-#define PFIT_VERT_SCALE_SHIFT_965 16
-#define PFIT_VERT_SCALE_MASK_965 0x1fff0000
-#define PFIT_HORIZ_SCALE_SHIFT_965 0
-#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
+#define PFIT_VERT_SCALE_MASK REG_GENMASK(31, 20) /* pre-965 */
+#define PFIT_VERT_SCALE(x) REG_FIELD_PREP(PFIT_VERT_SCALE_MASK, (x))
+#define PFIT_HORIZ_SCALE_MASK REG_GENMASK(15, 4) /* pre-965 */
+#define PFIT_HORIZ_SCALE(x) REG_FIELD_PREP(PFIT_HORIZ_SCALE_MASK, (x))
+#define PFIT_VERT_SCALE_MASK_965 REG_GENMASK(28, 16) /* 965+ */
+#define PFIT_HORIZ_SCALE_MASK_965 REG_GENMASK(12, 0) /* 965+ */
#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
@@ -2655,8 +2658,13 @@
#define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */
#define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */
#define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */
+#define PIPE_MISC_PSR_MASK_PRIMARY_FLIP REG_BIT(23) /* bdw */
+#define PIPE_MISC_PSR_MASK_SPRITE_ENABLE REG_BIT(22) /* bdw */
+#define PIPE_MISC_PSR_MASK_PIPE_REG_WRITE REG_BIT(21) /* skl+ */
+#define PIPE_MISC_PSR_MASK_CURSOR_MOVE REG_BIT(21) /* bdw */
+#define PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT REG_BIT(20)
#define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11)
-#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
+#define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
/*
* For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
* valid values of: 6, 8, 10 BPC.
@@ -4005,20 +4013,28 @@
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
-#define _PFA_CTL_1 0x68080
-#define _PFB_CTL_1 0x68880
-#define PF_ENABLE (1 << 31)
-#define PF_PIPE_SEL_MASK_IVB (3 << 29)
-#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29)
-#define PF_FILTER_MASK (3 << 23)
-#define PF_FILTER_PROGRAMMED (0 << 23)
-#define PF_FILTER_MED_3x3 (1 << 23)
-#define PF_FILTER_EDGE_ENHANCE (2 << 23)
-#define PF_FILTER_EDGE_SOFTEN (3 << 23)
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
+#define PF_ENABLE REG_BIT(31)
+#define PF_PIPE_SEL_MASK_IVB REG_GENMASK(30, 29) /* ivb/hsw */
+#define PF_PIPE_SEL_IVB(pipe) REG_FIELD_PREP(PF_PIPE_SEL_MASK_IVB, (pipe))
+#define PF_FILTER_MASK REG_GENMASK(24, 23)
+#define PF_FILTER_PROGRAMMED REG_FIELD_PREP(PF_FILTER_MASK, 0)
+#define PF_FILTER_MED_3x3 REG_FIELD_PREP(PF_FILTER_MASK, 1)
+#define PF_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 2)
+#define PF_FILTER_EDGE_SOFTEN REG_FIELD_PREP(PF_FILTER_EDGE_MASK, 3)
#define _PFA_WIN_SZ 0x68074
#define _PFB_WIN_SZ 0x68874
+#define PF_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PF_WIN_XSIZE(w) REG_FIELD_PREP(PF_WIN_XSIZE_MASK, (w))
+#define PF_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PF_WIN_YSIZE(h) REG_FIELD_PREP(PF_WIN_YSIZE_MASK, (h))
#define _PFA_WIN_POS 0x68070
#define _PFB_WIN_POS 0x68870
+#define PF_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PF_WIN_XPOS(x) REG_FIELD_PREP(PF_WIN_XPOS_MASK, (x))
+#define PF_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PF_WIN_YPOS(y) REG_FIELD_PREP(PF_WIN_YPOS_MASK, (y))
#define _PFA_VSCALE 0x68084
#define _PFB_VSCALE 0x68884
#define _PFA_HSCALE 0x68090
@@ -4030,18 +4046,6 @@
#define PF_VSCALE(pipe) _MMIO_PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _MMIO_PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
-#define _PSA_CTL 0x68180
-#define _PSB_CTL 0x68980
-#define PS_ENABLE (1 << 31)
-#define _PSA_WIN_SZ 0x68174
-#define _PSB_WIN_SZ 0x68974
-#define _PSA_WIN_POS 0x68170
-#define _PSB_WIN_POS 0x68970
-
-#define PS_CTL(pipe) _MMIO_PIPE(pipe, _PSA_CTL, _PSB_CTL)
-#define PS_WIN_SZ(pipe) _MMIO_PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
-#define PS_WIN_POS(pipe) _MMIO_PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
-
/*
* Skylake scalers
*/
@@ -4050,63 +4054,78 @@
#define _PS_1B_CTRL 0x68980
#define _PS_2B_CTRL 0x68A80
#define _PS_1C_CTRL 0x69180
-#define PS_SCALER_EN (1 << 31)
-#define SKL_PS_SCALER_MODE_MASK (3 << 28)
-#define SKL_PS_SCALER_MODE_DYN (0 << 28)
-#define SKL_PS_SCALER_MODE_HQ (1 << 28)
-#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
-#define PS_SCALER_MODE_PLANAR (1 << 29)
-#define PS_SCALER_MODE_NORMAL (0 << 29)
-#define PS_PLANE_SEL_MASK (7 << 25)
-#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
-#define PS_FILTER_MASK (3 << 23)
-#define PS_FILTER_MEDIUM (0 << 23)
-#define PS_FILTER_PROGRAMMED (1 << 23)
-#define PS_FILTER_EDGE_ENHANCE (2 << 23)
-#define PS_FILTER_BILINEAR (3 << 23)
-#define PS_VERT3TAP (1 << 21)
-#define PS_VERT_INT_INVERT_FIELD1 (0 << 20)
-#define PS_VERT_INT_INVERT_FIELD0 (1 << 20)
-#define PS_PWRUP_PROGRESS (1 << 17)
-#define PS_V_FILTER_BYPASS (1 << 8)
-#define PS_VADAPT_EN (1 << 7)
-#define PS_VADAPT_MODE_MASK (3 << 5)
-#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
-#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
-#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
-#define PS_PLANE_Y_SEL_MASK (7 << 5)
-#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
-#define PS_Y_VERT_FILTER_SELECT(set) ((set) << 4)
-#define PS_Y_HORZ_FILTER_SELECT(set) ((set) << 3)
-#define PS_UV_VERT_FILTER_SELECT(set) ((set) << 2)
-#define PS_UV_HORZ_FILTER_SELECT(set) ((set) << 1)
+#define PS_SCALER_EN REG_BIT(31)
+#define SKL_PS_SCALER_MODE_MASK REG_GENMASK(29, 28) /* skl/bxt */
+#define SKL_PS_SCALER_MODE_DYN REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 0)
+#define SKL_PS_SCALER_MODE_HQ REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 1)
+#define SKL_PS_SCALER_MODE_NV12 REG_FIELD_PREP(SKL_PS_SCALER_MODE_MASK, 2)
+#define PS_SCALER_MODE_MASK REG_BIT(29) /* glk-tgl */
+#define PS_SCALER_MODE_NORMAL REG_FIELD_PREP(PS_SCALER_MODE_MASK, 0)
+#define PS_SCALER_MODE_PLANAR REG_FIELD_PREP(PS_SCALER_MODE_MASK, 1)
+#define PS_BINDING_MASK REG_GENMASK(27, 25)
+#define PS_BINDING_PIPE REG_FIELD_PREP(PS_BINDING_MASK, 0)
+#define PS_BINDING_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_MASK, (plane_id) + 1)
+#define PS_FILTER_MASK REG_GENMASK(24, 23)
+#define PS_FILTER_MEDIUM REG_FIELD_PREP(PS_FILTER_MASK, 0)
+#define PS_FILTER_PROGRAMMED REG_FIELD_PREP(PS_FILTER_MASK, 1)
+#define PS_FILTER_EDGE_ENHANCE REG_FIELD_PREP(PS_FILTER_MASK, 2)
+#define PS_FILTER_BILINEAR REG_FIELD_PREP(PS_FILTER_MASK, 3)
+#define PS_VERT3TAP REG_BIT(21) /* skl/bxt */
+#define PS_VERT_INT_INVERT_FIELD REG_BIT(20)
+#define PS_PWRUP_PROGRESS REG_BIT(17)
+#define PS_V_FILTER_BYPASS REG_BIT(8)
+#define PS_VADAPT_EN REG_BIT(7) /* skl/bxt */
+#define PS_VADAPT_MODE_MASK REG_GENMASK(6, 5) /* skl/bxt */
+#define PS_VADAPT_MODE_LEAST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 0)
+#define PS_VADAPT_MODE_MOD_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 1)
+#define PS_VADAPT_MODE_MOST_ADAPT REG_FIELD_PREP(PS_VADAPT_MODE_MASK, 3)
+#define PS_BINDING_Y_MASK REG_GENMASK(7, 5) /* icl-tgl */
+#define PS_BINDING_Y_PLANE(plane_id) REG_FIELD_PREP(PS_BINDING_Y_MASK, (plane_id) + 1)
+#define PS_Y_VERT_FILTER_SELECT_MASK REG_BIT(4) /* glk+ */
+#define PS_Y_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_VERT_FILTER_SELECT_MASK, (set))
+#define PS_Y_HORZ_FILTER_SELECT_MASK REG_BIT(3) /* glk+ */
+#define PS_Y_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_Y_HORZ_FILTER_SELECT_MASK, (set))
+#define PS_UV_VERT_FILTER_SELECT_MASK REG_BIT(2) /* glk+ */
+#define PS_UV_VERT_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_VERT_FILTER_SELECT_MASK, (set))
+#define PS_UV_HORZ_FILTER_SELECT_MASK REG_BIT(1) /* glk+ */
+#define PS_UV_HORZ_FILTER_SELECT(set) REG_FIELD_PREP(PS_UV_HORZ_FILTER_SELECT_MASK, (set))
#define _PS_PWR_GATE_1A 0x68160
#define _PS_PWR_GATE_2A 0x68260
#define _PS_PWR_GATE_1B 0x68960
#define _PS_PWR_GATE_2B 0x68A60
#define _PS_PWR_GATE_1C 0x69160
-#define PS_PWR_GATE_DIS_OVERRIDE (1 << 31)
-#define PS_PWR_GATE_SETTLING_TIME_32 (0 << 3)
-#define PS_PWR_GATE_SETTLING_TIME_64 (1 << 3)
-#define PS_PWR_GATE_SETTLING_TIME_96 (2 << 3)
-#define PS_PWR_GATE_SETTLING_TIME_128 (3 << 3)
-#define PS_PWR_GATE_SLPEN_8 0
-#define PS_PWR_GATE_SLPEN_16 1
-#define PS_PWR_GATE_SLPEN_24 2
-#define PS_PWR_GATE_SLPEN_32 3
+#define PS_PWR_GATE_DIS_OVERRIDE REG_BIT(31)
+#define PS_PWR_GATE_SETTLING_TIME_MASK REG_GENMASK(4, 3)
+#define PS_PWR_GATE_SETTLING_TIME_32 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 0)
+#define PS_PWR_GATE_SETTLING_TIME_64 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 1)
+#define PS_PWR_GATE_SETTLING_TIME_96 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 2)
+#define PS_PWR_GATE_SETTLING_TIME_128 REG_FIELD_PREP(PS_PWR_GATE_SETTLING_TIME_MASK, 3)
+#define PS_PWR_GATE_SLPEN_MASK REG_GENMASK(1, 0)
+#define PS_PWR_GATE_SLPEN_8 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 0)
+#define PS_PWR_GATE_SLPEN_16 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 1)
+#define PS_PWR_GATE_SLPEN_24 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 2)
+#define PS_PWR_GATE_SLPEN_32 REG_FIELD_PREP(PS_PWR_GATE_SLPEN_MASK, 3)
#define _PS_WIN_POS_1A 0x68170
#define _PS_WIN_POS_2A 0x68270
#define _PS_WIN_POS_1B 0x68970
#define _PS_WIN_POS_2B 0x68A70
#define _PS_WIN_POS_1C 0x69170
+#define PS_WIN_XPOS_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XPOS(x) REG_FIELD_PREP(PS_WIN_XPOS_MASK, (x))
+#define PS_WIN_YPOS_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YPOS(y) REG_FIELD_PREP(PS_WIN_YPOS_MASK, (y))
#define _PS_WIN_SZ_1A 0x68174
#define _PS_WIN_SZ_2A 0x68274
#define _PS_WIN_SZ_1B 0x68974
#define _PS_WIN_SZ_2B 0x68A74
#define _PS_WIN_SZ_1C 0x69174
+#define PS_WIN_XSIZE_MASK REG_GENMASK(31, 16)
+#define PS_WIN_XSIZE(w) REG_FIELD_PREP(PS_WIN_XSIZE_MASK, (w))
+#define PS_WIN_YSIZE_MASK REG_GENMASK(15, 0)
+#define PS_WIN_YSIZE(h) REG_FIELD_PREP(PS_WIN_YSIZE_MASK, (h))
#define _PS_VSCALE_1A 0x68184
#define _PS_VSCALE_2A 0x68284
@@ -4125,10 +4144,12 @@
#define _PS_VPHASE_1B 0x68988
#define _PS_VPHASE_2B 0x68A88
#define _PS_VPHASE_1C 0x69188
-#define PS_Y_PHASE(x) ((x) << 16)
-#define PS_UV_RGB_PHASE(x) ((x) << 0)
-#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
-#define PS_PHASE_TRIP (1 << 0)
+#define PS_Y_PHASE_MASK REG_GENMASK(31, 16)
+#define PS_Y_PHASE(x) REG_FIELD_PREP(PS_Y_PHASE_MASK, (x))
+#define PS_UV_RGB_PHASE_MASK REG_GENMASK(15, 0)
+#define PS_UV_RGB_PHASE(x) REG_FIELD_PREP(PS_UV_RGB_PHASE_MASK, (x))
+#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */
+#define PS_PHASE_TRIP (1 << 0)
#define _PS_HPHASE_1A 0x68194
#define _PS_HPHASE_2A 0x68294
@@ -4146,7 +4167,7 @@
#define _PS_COEF_SET0_INDEX_2A 0x68298
#define _PS_COEF_SET0_INDEX_1B 0x68998
#define _PS_COEF_SET0_INDEX_2B 0x68A98
-#define PS_COEE_INDEX_AUTO_INC (1 << 10)
+#define PS_COEF_INDEX_AUTO_INC REG_BIT(10)
#define _PS_COEF_SET0_DATA_1A 0x6819C
#define _PS_COEF_SET0_DATA_2A 0x6829C
@@ -4482,56 +4503,79 @@
#define GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin) (1 << (_HPD_PIN_TC(hpd_pin) * 4))
#define GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin) (0 << (_HPD_PIN_TC(hpd_pin) * 4))
+#define PICAINTERRUPT_ISR _MMIO(0x16FE50)
+#define PICAINTERRUPT_IMR _MMIO(0x16FE54)
+#define PICAINTERRUPT_IIR _MMIO(0x16FE58)
+#define PICAINTERRUPT_IER _MMIO(0x16FE5C)
+
+#define XELPDP_DP_ALT_HOTPLUG(hpd_pin) REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_DP_ALT_HOTPLUG_MASK REG_GENMASK(19, 16)
+
+#define XELPDP_AUX_TC(hpd_pin) REG_BIT(8 + _HPD_PIN_TC(hpd_pin))
+#define XELPDP_AUX_TC_MASK REG_GENMASK(11, 8)
+
+#define XELPDP_TBT_HOTPLUG(hpd_pin) REG_BIT(_HPD_PIN_TC(hpd_pin))
+#define XELPDP_TBT_HOTPLUG_MASK REG_GENMASK(3, 0)
+
+#define XELPDP_PORT_HOTPLUG_CTL(hpd_pin) _MMIO(0x16F270 + (_HPD_PIN_TC(hpd_pin) * 0x200))
+#define XELPDP_TBT_HOTPLUG_ENABLE REG_BIT(6)
+#define XELPDP_TBT_HPD_LONG_DETECT REG_BIT(5)
+#define XELPDP_TBT_HPD_SHORT_DETECT REG_BIT(4)
+#define XELPDP_DP_ALT_HOTPLUG_ENABLE REG_BIT(2)
+#define XELPDP_DP_ALT_HPD_LONG_DETECT REG_BIT(1)
+#define XELPDP_DP_ALT_HPD_SHORT_DETECT REG_BIT(0)
+
#define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004)
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
-#define ILK_ELPIN_409_SELECT (1 << 25)
-#define ILK_DPARB_GATE (1 << 22)
-#define ILK_VSDPFD_FULL (1 << 21)
-#define FUSE_STRAP _MMIO(0x42014)
-#define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
-#define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30)
-#define ILK_DISPLAY_DEBUG_DISABLE (1 << 29)
-#define IVB_PIPE_C_DISABLE (1 << 28)
-#define ILK_HDCP_DISABLE (1 << 25)
-#define ILK_eDP_A_DISABLE (1 << 24)
-#define HSW_CDCLK_LIMIT (1 << 24)
-#define ILK_DESKTOP (1 << 23)
-#define HSW_CPU_SSC_ENABLE (1 << 21)
-
-#define FUSE_STRAP3 _MMIO(0x42020)
-#define HSW_REF_CLK_SELECT (1 << 1)
-
-#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
-#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
-#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
-#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
-#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
-
-#define IVB_CHICKEN3 _MMIO(0x4200c)
-# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
-# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
-
-#define CHICKEN_PAR1_1 _MMIO(0x42080)
-#define IGNORE_KVMR_PIPE_A REG_BIT(23)
-#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
-#define DIS_RAM_BYPASS_PSR2_MAN_TRACK (1 << 16)
-#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15)
-#define DPA_MASK_VBLANK_SRD (1 << 15)
-#define FORCE_ARB_IDLE_PLANES (1 << 14)
-#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
-#define IGNORE_PSR2_HW_TRACKING (1 << 1)
+#define ILK_ELPIN_409_SELECT REG_BIT(25)
+#define ILK_DPARB_GATE REG_BIT(22)
+#define ILK_VSDPFD_FULL REG_BIT(21)
+
+#define FUSE_STRAP _MMIO(0x42014)
+#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
+#define ILK_INTERNAL_DISPLAY_DISABLE REG_BIT(30)
+#define ILK_DISPLAY_DEBUG_DISABLE REG_BIT(29)
+#define IVB_PIPE_C_DISABLE REG_BIT(28)
+#define ILK_HDCP_DISABLE REG_BIT(25)
+#define ILK_eDP_A_DISABLE REG_BIT(24)
+#define HSW_CDCLK_LIMIT REG_BIT(24)
+#define ILK_DESKTOP REG_BIT(23)
+#define HSW_CPU_SSC_ENABLE REG_BIT(21)
+
+#define FUSE_STRAP3 _MMIO(0x42020)
+#define HSW_REF_CLK_SELECT REG_BIT(1)
+
+#define ILK_DSPCLK_GATE_D _MMIO(0x42020)
+#define ILK_VRHUNIT_CLOCK_GATE_DISABLE REG_BIT(28)
+#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE REG_BIT(9)
+#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE REG_BIT(8)
+#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE REG_BIT(7)
+#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE REG_BIT(5)
+
+#define IVB_CHICKEN3 _MMIO(0x4200c)
+#define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE REG_BIT(5)
+#define CHICKEN3_DGMG_DONE_FIX_DISABLE REG_BIT(2)
+
+#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define IGNORE_KVMR_PIPE_A REG_BIT(23)
+#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
+#define DIS_RAM_BYPASS_PSR2_MAN_TRACK REG_BIT(16)
+#define SKL_DE_COMPRESSED_HASH_MODE REG_BIT(15)
+#define HSW_MASK_VBL_TO_PIPE_IN_SRD REG_BIT(15) /* hsw/bdw */
+#define FORCE_ARB_IDLE_PLANES REG_BIT(14)
+#define SKL_EDP_PSR_FIX_RDWRAP REG_BIT(3)
+#define IGNORE_PSR2_HW_TRACKING REG_BIT(1)
#define CHICKEN_PAR2_1 _MMIO(0x42090)
-#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
+#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14)
#define CHICKEN_MISC_2 _MMIO(0x42084)
-#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
-#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
-#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
-#define GLK_CL2_PWR_DOWN (1 << 12)
-#define GLK_CL1_PWR_DOWN (1 << 11)
-#define GLK_CL0_PWR_DOWN (1 << 10)
+#define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */
+#define KBL_ARB_FILL_SPARE_14 REG_BIT(14)
+#define KBL_ARB_FILL_SPARE_13 REG_BIT(13)
+#define GLK_CL2_PWR_DOWN REG_BIT(12)
+#define GLK_CL1_PWR_DOWN REG_BIT(11)
+#define GLK_CL0_PWR_DOWN REG_BIT(10)
#define CHICKEN_MISC_4 _MMIO(0x4208c)
#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
@@ -4540,24 +4584,26 @@
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
-#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
-#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
-#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
-#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
-#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
-#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
-#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
-#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
-#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
-#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
-#define HSW_FBCQ_DIS (1 << 22)
-#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
-#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
-#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
-#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
-#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
-#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
-#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
+#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
+#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
+#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
+#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
+#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
+#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
+#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
+#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
+#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
+#define HSW_FBCQ_DIS REG_BIT(22)
+#define HSW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(15) /* hsw */
+#define SKL_PSR_MASK_PLANE_FLIP REG_BIT(11) /* skl+ */
+#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
+#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
+#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
#define _CHICKEN_TRANS_A 0x420c0
#define _CHICKEN_TRANS_B 0x420c4
@@ -4570,32 +4616,33 @@
[TRANSCODER_B] = _CHICKEN_TRANS_B, \
[TRANSCODER_C] = _CHICKEN_TRANS_C, \
[TRANSCODER_D] = _CHICKEN_TRANS_D))
-
#define _MTL_CHICKEN_TRANS_A 0x604e0
#define _MTL_CHICKEN_TRANS_B 0x614e0
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
-
-#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
-#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
-#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
-#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
-#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
-#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
-#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
-#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
-#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
-#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
-#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
+#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
+#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
+#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
+#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
+#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
+#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
+#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
+#define PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+#define PSR2_VSC_ENABLE_PROG_HEADER REG_BIT(12)
#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE (1 << 31)
-#define DISP_TILE_SURFACE_SWIZZLING (1 << 13)
-#define DISP_FBC_WM_DIS (1 << 15)
+#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
+#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
+#define DISP_FBC_WM_DIS REG_BIT(15)
+
#define DISP_ARB_CTL2 _MMIO(0x45004)
-#define DISP_DATA_PARTITION_5_6 (1 << 6)
-#define DISP_IPC_ENABLE (1 << 3)
+#define DISP_DATA_PARTITION_5_6 REG_BIT(6)
+#define DISP_IPC_ENABLE REG_BIT(3)
#define GEN7_MSG_CTL _MMIO(0x45010)
#define WAIT_FOR_PCH_RESET_ACK (1 << 1)
@@ -4768,7 +4815,8 @@
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
-/* south display engine interrupt: ICP/TGP */
+/* south display engine interrupt: ICP/TGP/MTP */
+#define SDE_PICAINTERRUPT REG_BIT(31)
#define SDE_GMBUS_ICP (1 << 23)
#define SDE_TC_HOTPLUG_ICP(hpd_pin) REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
#define SDE_TC_HOTPLUG_DG2(hpd_pin) REG_BIT(25 + _HPD_PIN_TC(hpd_pin)) /* sigh */
@@ -5104,24 +5152,32 @@
#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1)
#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE REG_BIT(10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE REG_BIT(4)
+
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27)
-#define TRANS_CHICKEN2_FRAME_START_DELAY(x) ((x) << 27) /* 0-3 */
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE REG_BIT(31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED REG_BIT(29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define TRANS_CHICKEN2_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_CHICKEN2_FRAME_START_DELAY_MASK, (x)) /* 0-3 */
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER REG_BIT(26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH REG_BIT(25)
#define SOUTH_CHICKEN1 _MMIO(0xc2000)
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define INVERT_DDIE_HPD REG_BIT(28)
+#define INVERT_DDID_HPD_MTP REG_BIT(27)
+#define INVERT_TC4_HPD REG_BIT(26)
+#define INVERT_TC3_HPD REG_BIT(25)
+#define INVERT_TC2_HPD REG_BIT(24)
+#define INVERT_TC1_HPD REG_BIT(23)
#define INVERT_DDID_HPD (1 << 18)
#define INVERT_DDIC_HPD (1 << 17)
#define INVERT_DDIB_HPD (1 << 16)
@@ -5303,6 +5359,20 @@
#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
+#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D
+#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0)
+#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK
+#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31)
+#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28)
+#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x))
+#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x))
+#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x))
+#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \
+ ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \
+ (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \
+ (DISPLAY_TO_PCODE_VOLTAGE(voltage_level)))
#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
@@ -5563,6 +5633,8 @@ enum skl_power_gate {
#define TRANS_DDI_HDCP_SELECT REG_BIT(5)
#define TRANS_DDI_BFI_ENABLE (1 << 4)
#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4)
+#define TRANS_DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
+#define TRANS_DDI_PORT_WIDTH(width) REG_FIELD_PREP(TRANS_DDI_PORT_WIDTH_MASK, (width) - 1)
#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0)
#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
@@ -5622,11 +5694,16 @@ enum skl_power_gate {
/* DDI Buffer Control */
#define _DDI_BUF_CTL_A 0x64000
#define _DDI_BUF_CTL_B 0x64100
+/* Known as DDI_CTL_DE in MTL+ */
#define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1 << 31)
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf << 24)
#define DDI_BUF_PHY_LINK_RATE(r) ((r) << 20)
+#define DDI_BUF_PORT_DATA_MASK REG_GENMASK(19, 18)
+#define DDI_BUF_PORT_DATA_10BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 0)
+#define DDI_BUF_PORT_DATA_20BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 1)
+#define DDI_BUF_PORT_DATA_40BIT REG_FIELD_PREP(DDI_BUF_PORT_DATA_MASK, 2)
#define DDI_BUF_PORT_REVERSAL (1 << 16)
#define DDI_BUF_IS_IDLE (1 << 7)
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index db26de6b57bc..622d603080f9 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -23,6 +23,19 @@
((__n) < 0 || (__n) > 31))))
/**
+ * REG_BIT8() - Prepare a u8 bit value
+ * @__n: 0-based bit number
+ *
+ * Local wrapper for BIT() to force u8, with compile time checks.
+ *
+ * @return: Value with bit @__n set.
+ */
+#define REG_BIT8(__n) \
+ ((u8)(BIT(__n) + \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \
+ ((__n) < 0 || (__n) > 7))))
+
+/**
* REG_GENMASK() - Prepare a continuous u32 bitmask
* @__high: 0-based high bit
* @__low: 0-based low bit
@@ -52,6 +65,21 @@
__is_constexpr(__low) && \
((__low) < 0 || (__high) > 63 || (__low) > (__high)))))
+/**
+ * REG_GENMASK8() - Prepare a continuous u8 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK() to force u8, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK8(__high, __low) \
+ ((u8)(GENMASK(__high, __low) + \
+ BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \
+ __is_constexpr(__low) && \
+ ((__low) < 0 || (__high) > 7 || (__low) > (__high)))))
+
/*
* Local integer constant expression version of is_power_of_2().
*/
@@ -75,6 +103,23 @@
BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
/**
+ * REG_FIELD_PREP8() - Prepare a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP8(__mask, __val) \
+ ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
* REG_FIELD_GET() - Extract a u32 bitfield value
* @__mask: shifted mask defining the field's length and position
* @__val: value to extract the bitfield value from
@@ -155,6 +200,18 @@
*/
#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
+/**
+ * REG_FIELD_GET8() - Extract a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u8 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+
typedef struct {
u32 reg;
} i915_reg_t;
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 2c5302bcba19..a27600bc5976 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -36,6 +36,7 @@
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
+#include "i915_reg.h"
#include "intel_clock_gating.h"
#include "intel_mchbar_regs.h"
#include "vlv_sideband.h"
@@ -520,12 +521,12 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL);
/* WaPsrDPAMaskVBlankInSRD:bdw */
- intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, DPA_MASK_VBLANK_SRD);
+ intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
for_each_pipe(i915, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
- 0, BDW_DPRS_MASK_VBLANK_SRD);
+ 0, BDW_UNMASK_VBL_TO_REGS_IN_SRD);
}
/* WaVSRefCountFullforceMissDisable:bdw */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index fc5cd14adfcc..4e23be2995bf 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -381,6 +381,13 @@ void intel_device_info_runtime_init_early(struct drm_i915_private *i915)
intel_device_info_subplatform_init(i915);
}
+/* FIXME: Remove this, and make device info a const pointer to rodata. */
+static struct intel_device_info *
+mkwrite_device_info(struct drm_i915_private *i915)
+{
+ return (struct intel_device_info *)INTEL_INFO(i915);
+}
+
/**
* intel_device_info_runtime_init - initialize runtime info
* @dev_priv: the i915 device
@@ -548,6 +555,28 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
}
+/*
+ * Set up device info and initial runtime info at driver create.
+ *
+ * Note: i915 is only an allocated blob of memory at this point.
+ */
+void intel_device_info_driver_create(struct drm_i915_private *i915,
+ u16 device_id,
+ const struct intel_device_info *match_info)
+{
+ struct intel_device_info *info;
+ struct intel_runtime_info *runtime;
+
+ /* Setup the write-once "constant" device info */
+ info = mkwrite_device_info(i915);
+ memcpy(info, match_info, sizeof(*info));
+
+ /* Initialize initial runtime info from static const data and pdev. */
+ runtime = RUNTIME_INFO(i915);
+ memcpy(runtime, &INTEL_INFO(i915)->__runtime, sizeof(*runtime));
+ runtime->device_id = device_id;
+}
+
void intel_driver_caps_print(const struct intel_driver_caps *caps,
struct drm_printer *p)
{
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 080a4557899b..f032f2500f50 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -317,6 +317,8 @@ struct intel_driver_caps {
const char *intel_platform_name(enum intel_platform platform);
+void intel_device_info_driver_create(struct drm_i915_private *i915, u16 device_id,
+ const struct intel_device_info *match_info);
void intel_device_info_runtime_init_early(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0012.h b/drivers/gpu/drm/nouveau/include/nvif/if0012.h
index eb99d84eb844..16d4ad5023a3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0012.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0012.h
@@ -2,6 +2,8 @@
#ifndef __NVIF_IF0012_H__
#define __NVIF_IF0012_H__
+#include <drm/display/drm_dp.h>
+
union nvif_outp_args {
struct nvif_outp_v0 {
__u8 version;
@@ -63,7 +65,7 @@ union nvif_outp_acquire_args {
__u8 hda;
__u8 mst;
__u8 pad04[4];
- __u8 dpcd[16];
+ __u8 dpcd[DP_RECEIVER_CAP_SIZE];
} dp;
};
} v0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index b7631c1ab242..4e7f873f66e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -3,6 +3,7 @@
#define __NVKM_DISP_OUTP_H__
#include "priv.h"
+#include <drm/display/drm_dp.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
#include <subdev/bios/dp.h>
@@ -42,7 +43,7 @@ struct nvkm_outp {
bool aux_pwr_pu;
u8 lttpr[6];
u8 lttprs;
- u8 dpcd[16];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
struct {
int dpcd; /* -1, or index into SUPPORTED_LINK_RATES table */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
index 4f0ca709c85a..fc283a4a1522 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
@@ -146,7 +146,7 @@ nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
-nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[16],
+nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
u8 link_nr, u8 link_bw, bool hda, bool mst)
{
int ret;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8ef25ab305ae..b8f4dac68d85 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5517,6 +5517,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
u8 frev, crev;
u8 *power_state_offset;
struct ci_ps *ps;
+ int ret;
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset))
@@ -5546,11 +5547,15 @@ static int ci_parse_power_table(struct radeon_device *rdev)
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
- if (!rdev->pm.power_state[i].clock_info)
- return -EINVAL;
+ if (!rdev->pm.power_state[i].clock_info) {
+ ret = -EINVAL;
+ goto err_free_ps;
+ }
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL)
- return -ENOMEM;
+ if (ps == NULL) {
+ ret = -ENOMEM;
+ goto err_free_ps;
+ }
rdev->pm.dpm.ps[i].ps_priv = ps;
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
@@ -5590,6 +5595,12 @@ static int ci_parse_power_table(struct radeon_device *rdev)
}
return 0;
+
+err_free_ps:
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++)
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ kfree(rdev->pm.dpm.ps);
+ return ret;
}
static int ci_get_vbios_boot_values(struct radeon_device *rdev,
@@ -5678,25 +5689,26 @@ int ci_dpm_init(struct radeon_device *rdev)
ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = r600_get_platform_caps(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = r600_parse_extended_power_table(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
return ret;
}
ret = ci_parse_power_table(rdev);
if (ret) {
- ci_dpm_fini(rdev);
+ kfree(rdev->pm.dpm.priv);
+ r600_free_extended_power_table(rdev);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 46a27ebf4588..a6700d7278bf 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -270,7 +270,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i;
+ u64 size;
+ unsigned i;
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index ab725d9d14a6..feac2f2c736e 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -450,3 +450,9 @@ ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+
+unsigned long ttm_tt_pages_limit(void)
+{
+ return ttm_pages_limit;
+}
+EXPORT_SYMBOL(ttm_tt_pages_limit);
diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c
index e3799a53a193..9c88861986c8 100644
--- a/drivers/greybus/connection.c
+++ b/drivers/greybus/connection.c
@@ -187,8 +187,8 @@ _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
spin_lock_init(&connection->lock);
INIT_LIST_HEAD(&connection->operations);
- connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
- dev_name(&hd->dev), hd_cport_id);
+ connection->wq = alloc_ordered_workqueue("%s:%d", 0, dev_name(&hd->dev),
+ hd_cport_id);
if (!connection->wq) {
ret = -ENOMEM;
goto err_free_connection;
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index 16cced80867a..0d7e749174a4 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -1318,7 +1318,7 @@ struct gb_svc *gb_svc_create(struct gb_host_device *hd)
if (!svc)
return NULL;
- svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
+ svc->wq = alloc_ordered_workqueue("%s:svc", 0, dev_name(&hd->dev));
if (!svc->wq) {
kfree(svc);
return NULL;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 1c65a6dfb9fa..67f95a29aeca 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1372,7 +1372,7 @@ static int vmbus_bus_init(void)
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
hv_synic_init, hv_synic_cleanup);
if (ret < 0)
- goto err_cpuhp;
+ goto err_alloc;
hyperv_cpuhp_online = ret;
ret = vmbus_connect();
@@ -1392,9 +1392,8 @@ static int vmbus_bus_init(void)
err_connect:
cpuhp_remove_state(hyperv_cpuhp_online);
-err_cpuhp:
- hv_synic_free();
err_alloc:
+ hv_synic_free();
if (vmbus_irq == -1) {
hv_remove_vmbus_handler();
} else {
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 0afb89c4629d..7f1bef59046f 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -221,7 +221,7 @@ static struct i2c_driver ad7414_driver = {
.name = "ad7414",
.of_match_table = of_match_ptr(ad7414_of_match),
},
- .probe_new = ad7414_probe,
+ .probe = ad7414_probe,
.id_table = ad7414_id,
};
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index 22bdb7e5b9e0..ffe81e445010 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -306,7 +306,7 @@ static struct i2c_driver ad7418_driver = {
.name = "ad7418",
.of_match_table = ad7418_dt_ids,
},
- .probe_new = ad7418_probe,
+ .probe = ad7418_probe,
.id_table = ad7418_id,
};
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 97b330b6c165..46e3c8c50765 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -521,7 +521,7 @@ static struct i2c_driver adc128_driver = {
.name = "adc128d818",
.of_match_table = of_match_ptr(adc128_of_match),
},
- .probe_new = adc128_probe,
+ .probe = adc128_probe,
.remove = adc128_remove,
.id_table = adc128_id,
.detect = adc128_detect,
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 2dc45e958730..7c15398ebb37 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -488,7 +488,7 @@ static struct i2c_driver adm1021_driver = {
.driver = {
.name = "adm1021",
},
- .probe_new = adm1021_probe,
+ .probe = adm1021_probe,
.id_table = adm1021_id,
.detect = adm1021_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 2984c4f98496..389382d54752 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -559,7 +559,7 @@ static struct i2c_driver adm1025_driver = {
.driver = {
.name = "adm1025",
},
- .probe_new = adm1025_probe,
+ .probe = adm1025_probe,
.id_table = adm1025_id,
.detect = adm1025_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 1f084f708743..581d8edf70ea 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1859,7 +1859,7 @@ static struct i2c_driver adm1026_driver = {
.driver = {
.name = "adm1026",
},
- .probe_new = adm1026_probe,
+ .probe = adm1026_probe,
.id_table = adm1026_id,
.detect = adm1026_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index eaf6e5e04aac..9a465f3f71c8 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -389,7 +389,7 @@ static struct i2c_driver adm1029_driver = {
.driver = {
.name = "adm1029",
},
- .probe_new = adm1029_probe,
+ .probe = adm1029_probe,
.id_table = adm1029_id,
.detect = adm1029_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index b42797bcb5b4..88c7e0d62d08 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -1068,7 +1068,7 @@ static struct i2c_driver adm1031_driver = {
.driver = {
.name = "adm1031",
},
- .probe_new = adm1031_probe,
+ .probe = adm1031_probe,
.id_table = adm1031_id,
.detect = adm1031_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c
index bfe070a1b501..60a893f27159 100644
--- a/drivers/hwmon/adm1177.c
+++ b/drivers/hwmon/adm1177.c
@@ -255,7 +255,7 @@ static struct i2c_driver adm1177_driver = {
.name = "adm1177",
.of_match_table = adm1177_dt_ids,
},
- .probe_new = adm1177_probe,
+ .probe = adm1177_probe,
.id_table = adm1177_id,
};
module_i2c_driver(adm1177_driver);
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 9eb973a38e4b..6dfbeb6acf00 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -819,7 +819,7 @@ static struct i2c_driver adm9240_driver = {
.driver = {
.name = "adm9240",
},
- .probe_new = adm9240_probe,
+ .probe = adm9240_probe,
.id_table = adm9240_id,
.detect = adm9240_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 7246198f0901..1932613ec095 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -208,7 +208,7 @@ static struct i2c_driver ads7828_driver = {
},
.id_table = ads7828_device_ids,
- .probe_new = ads7828_probe,
+ .probe = ads7828_probe,
};
module_i2c_driver(ads7828_driver);
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 0cebf6777239..952506779336 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -100,7 +100,7 @@ static struct i2c_driver adt7410_driver = {
.name = "adt7410",
.pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
},
- .probe_new = adt7410_i2c_probe,
+ .probe = adt7410_i2c_probe,
.id_table = adt7410_ids,
.address_list = I2C_ADDRS(0x48, 0x49, 0x4a, 0x4b),
};
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 6ba84921614f..45fe4e8aae4e 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -706,7 +706,7 @@ static struct i2c_driver adt7411_driver = {
.driver = {
.name = "adt7411",
},
- .probe_new = adt7411_probe,
+ .probe = adt7411_probe,
.id_table = adt7411_id,
.detect = adt7411_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 9c0235849d4b..429566c4245d 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -1819,7 +1819,7 @@ static struct i2c_driver adt7462_driver = {
.driver = {
.name = "adt7462",
},
- .probe_new = adt7462_probe,
+ .probe = adt7462_probe,
.id_table = adt7462_id,
.detect = adt7462_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 64f801b859ff..c4b3a4a18670 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1314,7 +1314,7 @@ static struct i2c_driver adt7470_driver = {
.driver = {
.name = "adt7470",
},
- .probe_new = adt7470_probe,
+ .probe = adt7470_probe,
.remove = adt7470_remove,
.id_table = adt7470_id,
.detect = adt7470_detect,
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 6a6ebcc896b1..c0ce88324ea6 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1468,7 +1468,7 @@ static int load_config3(const struct i2c_client *client, const char *propname)
u8 config3;
int ret;
- ret = of_property_read_string(client->dev.of_node, propname, &function);
+ ret = device_property_read_string(&client->dev, propname, &function);
if (!ret) {
ret = adt7475_read(REG_CONFIG3);
if (ret < 0)
@@ -1494,7 +1494,7 @@ static int load_config4(const struct i2c_client *client, const char *propname)
u8 config4;
int ret;
- ret = of_property_read_string(client->dev.of_node, propname, &function);
+ ret = device_property_read_string(&client->dev, propname, &function);
if (!ret) {
ret = adt7475_read(REG_CONFIG4);
if (ret < 0)
@@ -1556,8 +1556,8 @@ static int set_property_bit(const struct i2c_client *client, char *property,
u8 *config, u8 bit_index)
{
u32 prop_value = 0;
- int ret = of_property_read_u32(client->dev.of_node, property,
- &prop_value);
+ int ret = device_property_read_u32(&client->dev, property,
+ &prop_value);
if (!ret) {
if (prop_value)
@@ -1821,7 +1821,7 @@ static struct i2c_driver adt7475_driver = {
.name = "adt7475",
.of_match_table = of_match_ptr(adt7475_of_match),
},
- .probe_new = adt7475_probe,
+ .probe = adt7475_probe,
.id_table = adt7475_id,
.detect = adt7475_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index b8fe3f7248ba..4ecc02315f8f 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -334,7 +334,7 @@ static struct i2c_driver aht10_driver = {
.driver = {
.name = "aht10",
},
- .probe_new = aht10_probe,
+ .probe = aht10_probe,
.id_table = aht10_id,
};
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 3bfd12ff4b3c..2a7a4b6b0094 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -939,7 +939,7 @@ static struct i2c_driver amc6821_driver = {
.driver = {
.name = "amc6821",
},
- .probe_new = amc6821_probe,
+ .probe = amc6821_probe,
.id_table = amc6821_id,
.detect = amc6821_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index ce4da836765c..974521e9b6b4 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -223,7 +223,7 @@ static struct i2c_driver asb100_driver = {
.driver = {
.name = "asb100",
},
- .probe_new = asb100_probe,
+ .probe = asb100_probe,
.remove = asb100_remove,
.id_table = asb100_id,
.detect = asb100_detect,
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 54595454537b..87e186700849 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1191,7 +1191,7 @@ static struct i2c_driver asc7621_driver = {
.driver = {
.name = "asc7621",
},
- .probe_new = asc7621_probe,
+ .probe = asc7621_probe,
.remove = asc7621_remove,
.id_table = asc7621_id,
.detect = asc7621_detect,
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index e5be0cf472fc..f52a539eb33e 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -101,6 +101,8 @@ enum ec_sensors {
ec_sensor_temp_chipset,
/* CPU temperature [℃] */
ec_sensor_temp_cpu,
+ /* CPU package temperature [℃] */
+ ec_sensor_temp_cpu_package,
/* motherboard temperature [℃] */
ec_sensor_temp_mb,
/* "T_Sensor" temperature sensor reading [℃] */
@@ -139,6 +141,7 @@ enum ec_sensors {
#define SENSOR_TEMP_CHIPSET BIT(ec_sensor_temp_chipset)
#define SENSOR_TEMP_CPU BIT(ec_sensor_temp_cpu)
+#define SENSOR_TEMP_CPU_PACKAGE BIT(ec_sensor_temp_cpu_package)
#define SENSOR_TEMP_MB BIT(ec_sensor_temp_mb)
#define SENSOR_TEMP_T_SENSOR BIT(ec_sensor_temp_t_sensor)
#define SENSOR_TEMP_VRM BIT(ec_sensor_temp_vrm)
@@ -161,6 +164,7 @@ enum board_family {
family_unknown,
family_amd_400_series,
family_amd_500_series,
+ family_amd_600_series,
family_intel_300_series,
family_intel_600_series
};
@@ -233,6 +237,19 @@ static const struct ec_sensor_info sensors_family_amd_500[] = {
EC_SENSOR("Extra_3", hwmon_temp, 1, 0x01, 0x0c),
};
+static const struct ec_sensor_info sensors_family_amd_600[] = {
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
+ [ec_sensor_temp_cpu_package] = EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x32),
+ [ec_sensor_temp_vrm] =
+ EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
+ [ec_sensor_temp_water_in] =
+ EC_SENSOR("Water_In", hwmon_temp, 1, 0x01, 0x00),
+ [ec_sensor_temp_water_out] =
+ EC_SENSOR("Water_Out", hwmon_temp, 1, 0x01, 0x01),
+};
+
static const struct ec_sensor_info sensors_family_intel_300[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -319,6 +336,14 @@ static const struct ec_board_info board_info_pro_ws_x570_ace = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_crosshair_x670e_hero = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR |
@@ -463,6 +488,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_crosshair_viii_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
&board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E HERO",
+ &board_info_crosshair_x670e_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
@@ -946,6 +973,9 @@ static int asus_ec_probe(struct platform_device *pdev)
case family_amd_500_series:
ec_data->sensors_info = sensors_family_amd_500;
break;
+ case family_amd_600_series:
+ ec_data->sensors_info = sensors_family_amd_600;
+ break;
case family_intel_300_series:
ec_data->sensors_info = sensors_family_intel_300;
break;
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 118297ea1dcf..d1de020abec6 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -288,7 +288,7 @@ static struct i2c_driver atxp1_driver = {
.driver = {
.name = "atxp1",
},
- .probe_new = atxp1_probe,
+ .probe = atxp1_probe,
.id_table = atxp1_id,
};
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index 66c48f70fae7..cdbf3dff9172 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -2528,7 +2528,7 @@ static struct i2c_driver dme1737_i2c_driver = {
.driver = {
.name = "dme1737",
},
- .probe_new = dme1737_i2c_probe,
+ .probe = dme1737_i2c_probe,
.remove = dme1737_i2c_remove,
.id_table = dme1737_id,
.detect = dme1737_i2c_detect,
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index e803d6393b9e..21b635046521 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -384,7 +384,7 @@ static struct i2c_driver ds1621_driver = {
.driver = {
.name = "ds1621",
},
- .probe_new = ds1621_probe,
+ .probe = ds1621_probe,
.id_table = ds1621_id,
};
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 82d7c3d58f49..2b09536630cb 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -245,7 +245,7 @@ static struct i2c_driver ds620_driver = {
.driver = {
.name = "ds620",
},
- .probe_new = ds620_probe,
+ .probe = ds620_probe,
.id_table = ds620_id,
};
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 61d59189a6d1..bb7c859e799d 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -454,7 +454,7 @@ static struct i2c_driver sensor_emc1403 = {
.name = "emc1403",
},
.detect = emc1403_detect,
- .probe_new = emc1403_probe,
+ .probe = emc1403_probe,
.id_table = emc1403_idtable,
.address_list = emc1403_address_list,
};
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 361cf9292456..b59472bbe5bf 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -653,7 +653,7 @@ static struct i2c_driver emc2103_driver = {
.driver = {
.name = "emc2103",
},
- .probe_new = emc2103_probe,
+ .probe = emc2103_probe,
.id_table = emc2103_ids,
.detect = emc2103_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 723f57518c9a..29f0e4945f19 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -615,7 +615,7 @@ static struct i2c_driver emc2305_driver = {
.driver = {
.name = "emc2305",
},
- .probe_new = emc2305_probe,
+ .probe = emc2305_probe,
.remove = emc2305_remove,
.id_table = emc2305_ids,
.address_list = emc2305_normal_i2c,
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index bcd93f0fe982..9a4f868bf1e6 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -474,7 +474,7 @@ static struct i2c_driver emc6w201_driver = {
.driver = {
.name = "emc6w201",
},
- .probe_new = emc6w201_probe,
+ .probe = emc6w201_probe,
.id_table = emc6w201_id,
.detect = emc6w201_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 64fbb8cf687c..8c572bb64f5d 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -129,7 +129,7 @@ static struct i2c_driver f75375_driver = {
.driver = {
.name = "f75375",
},
- .probe_new = f75375_probe,
+ .probe = f75375_probe,
.remove = f75375_remove,
.id_table = f75375_id,
.detect = f75375_detect,
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index e1f426e86f36..b30512a705a7 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -241,7 +241,7 @@ static struct i2c_driver fschmd_driver = {
.driver = {
.name = "fschmd",
},
- .probe_new = fschmd_probe,
+ .probe = fschmd_probe,
.remove = fschmd_remove,
.id_table = fschmd_id,
.detect = fschmd_detect,
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index f5a4d91a7e90..b74a2665e733 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -678,7 +678,7 @@ static struct i2c_driver fts_driver = {
.name = "ftsteutates",
},
.id_table = fts_id,
- .probe_new = fts_probe,
+ .probe = fts_probe,
.detect = fts_detect,
.address_list = normal_i2c,
};
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 36717b524dbd..b5edee00267b 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -206,7 +206,7 @@ static struct i2c_driver g760a_driver = {
.driver = {
.name = "g760a",
},
- .probe_new = g760a_probe,
+ .probe = g760a_probe,
.id_table = g760a_id,
};
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index e2c3c34f06e8..1b6ff4712138 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -1084,7 +1084,7 @@ static struct i2c_driver g762_driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(g762_dt_match),
},
- .probe_new = g762_probe,
+ .probe = g762_probe,
.id_table = g762_id,
};
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 95286c40f55a..03db6158b13a 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -652,7 +652,7 @@ static struct i2c_driver gl518_driver = {
.driver = {
.name = "gl518sm",
},
- .probe_new = gl518_probe,
+ .probe = gl518_probe,
.id_table = gl518_id,
.detect = gl518_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 394da4ac977c..8bbc6a4f2928 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -895,7 +895,7 @@ static struct i2c_driver gl520_driver = {
.driver = {
.name = "gl520sm",
},
- .probe_new = gl520_probe,
+ .probe = gl520_probe,
.id_table = gl520_id,
.detect = gl520_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 3a7582824f94..a9726b5370fb 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -249,7 +249,7 @@ static struct i2c_driver hih6130_driver = {
.name = "hih6130",
.of_match_table = of_match_ptr(hih6130_of_match),
},
- .probe_new = hih6130_probe,
+ .probe = hih6130_probe,
.id_table = hih6130_id,
};
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index 9b58655d2de4..c558143e5285 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -594,7 +594,7 @@ static struct i2c_driver ina209_driver = {
.name = "ina209",
.of_match_table = of_match_ptr(ina209_of_match),
},
- .probe_new = ina209_probe,
+ .probe = ina209_probe,
.remove = ina209_remove,
.id_table = ina209_id,
};
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 8af07bc0c50e..f519c22d3907 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -633,7 +633,7 @@ static struct i2c_driver ina238_driver = {
.name = "ina238",
.of_match_table = of_match_ptr(ina238_of_match),
},
- .probe_new = ina238_probe,
+ .probe = ina238_probe,
.id_table = ina238_id,
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index fd50d9785ccb..cfd7efef5cdf 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -721,7 +721,7 @@ static struct i2c_driver ina2xx_driver = {
.name = "ina2xx",
.of_match_table = of_match_ptr(ina2xx_of_match),
},
- .probe_new = ina2xx_probe,
+ .probe = ina2xx_probe,
.id_table = ina2xx_id,
};
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 2735e3782ffb..5ab944056ec0 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -1010,7 +1010,7 @@ static const struct i2c_device_id ina3221_ids[] = {
MODULE_DEVICE_TABLE(i2c, ina3221_ids);
static struct i2c_driver ina3221_i2c_driver = {
- .probe_new = ina3221_probe,
+ .probe = ina3221_probe,
.remove = ina3221_remove,
.driver = {
.name = INA3221_DRIVER_NAME,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index eb38f54ebeb6..4c3641d28a6a 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -317,6 +317,9 @@ struct it87_devices {
* chips to avoid the problem.
*/
#define FEAT_CONF_NOEXIT BIT(19) /* Chip should not exit conf mode */
+#define FEAT_FOUR_FANS BIT(20) /* Supports four fans */
+#define FEAT_FOUR_PWM BIT(21) /* Supports four fan controls */
+#define FEAT_FOUR_TEMP BIT(22)
static const struct it87_devices it87_devices[] = {
[it87] = {
@@ -375,7 +378,8 @@ static const struct it87_devices it87_devices[] = {
.model = "IT8732F",
.features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
- | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
+ | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL | FEAT_FOUR_FANS
+ | FEAT_FOUR_PWM,
.peci_mask = 0x07,
.old_peci_mask = 0x02, /* Actually reports PCH */
},
@@ -472,7 +476,7 @@ static const struct it87_devices it87_devices[] = {
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
| FEAT_FIVE_PWM | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2
- | FEAT_AVCC3 | FEAT_VIN3_5V,
+ | FEAT_AVCC3 | FEAT_VIN3_5V | FEAT_FOUR_TEMP,
.peci_mask = 0x07,
.smbus_bitmap = BIT(1) | BIT(2),
},
@@ -508,16 +512,23 @@ static const struct it87_devices it87_devices[] = {
(((data)->features & FEAT_TEMP_OLD_PECI) && \
((data)->old_peci_mask & BIT(nr)))
#define has_fan16_config(data) ((data)->features & FEAT_FAN16_CONFIG)
+#define has_four_fans(data) ((data)->features & (FEAT_FOUR_FANS | \
+ FEAT_FIVE_FANS | \
+ FEAT_SIX_FANS))
#define has_five_fans(data) ((data)->features & (FEAT_FIVE_FANS | \
FEAT_SIX_FANS))
+#define has_six_fans(data) ((data)->features & FEAT_SIX_FANS)
#define has_vid(data) ((data)->features & FEAT_VID)
#define has_in7_internal(data) ((data)->features & FEAT_IN7_INTERNAL)
-#define has_six_fans(data) ((data)->features & FEAT_SIX_FANS)
#define has_avcc3(data) ((data)->features & FEAT_AVCC3)
-#define has_five_pwm(data) ((data)->features & (FEAT_FIVE_PWM \
- | FEAT_SIX_PWM))
+#define has_four_pwm(data) ((data)->features & (FEAT_FOUR_PWM | \
+ FEAT_FIVE_PWM | \
+ FEAT_SIX_PWM))
+#define has_five_pwm(data) ((data)->features & (FEAT_FIVE_PWM | \
+ FEAT_SIX_PWM))
#define has_six_pwm(data) ((data)->features & FEAT_SIX_PWM)
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
+#define has_four_temp(data) ((data)->features & FEAT_FOUR_TEMP)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
#define has_conf_noexit(data) ((data)->features & FEAT_CONF_NOEXIT)
@@ -2730,8 +2741,10 @@ static int __init it87_find(int sioaddr, unsigned short *address,
else
sio_data->skip_in |= BIT(9);
- if (!has_five_pwm(config))
+ if (!has_four_pwm(config))
sio_data->skip_pwm |= BIT(3) | BIT(4) | BIT(5);
+ else if (!has_five_pwm(config))
+ sio_data->skip_pwm |= BIT(4) | BIT(5);
else if (!has_six_pwm(config))
sio_data->skip_pwm |= BIT(5);
@@ -2924,6 +2937,34 @@ static int __init it87_find(int sioaddr, unsigned short *address,
sio_data->beep_pin = superio_inb(sioaddr,
IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8732) {
+ int reg;
+
+ superio_select(sioaddr, GPIO);
+
+ /* Check for pwm2, fan2 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg & BIT(2))
+ sio_data->skip_fan |= BIT(1);
+
+ /* Check for pwm3, fan3, fan4 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg & BIT(7))
+ sio_data->skip_fan |= BIT(2);
+ if (reg & BIT(5))
+ sio_data->skip_fan |= BIT(3);
+
+ /* Check if AVCC is on VIN3 */
+ reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+ if (reg & BIT(0))
+ sio_data->internal |= BIT(0);
+
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
int reg;
bool uart6;
@@ -3169,16 +3210,14 @@ static void it87_init_device(struct platform_device *pdev)
it87_check_tachometers_16bit_mode(pdev);
/* Check for additional fans */
- if (has_five_fans(data)) {
- tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
-
- if (tmp & BIT(4))
- data->has_fan |= BIT(3); /* fan4 enabled */
- if (tmp & BIT(5))
- data->has_fan |= BIT(4); /* fan5 enabled */
- if (has_six_fans(data) && (tmp & BIT(2)))
- data->has_fan |= BIT(5); /* fan6 enabled */
- }
+ tmp = it87_read_value(data, IT87_REG_FAN_16BIT);
+
+ if (has_four_fans(data) && (tmp & BIT(4)))
+ data->has_fan |= BIT(3); /* fan4 enabled */
+ if (has_five_fans(data) && (tmp & BIT(5)))
+ data->has_fan |= BIT(4); /* fan5 enabled */
+ if (has_six_fans(data) && (tmp & BIT(2)))
+ data->has_fan |= BIT(5); /* fan6 enabled */
/* Fan input pins may be used for alternative functions */
data->has_fan &= ~sio_data->skip_fan;
@@ -3356,7 +3395,9 @@ static int it87_probe(struct platform_device *pdev)
data->need_in7_reroute = sio_data->need_in7_reroute;
data->has_in = 0x3ff & ~sio_data->skip_in;
- if (has_six_temp(data)) {
+ if (has_four_temp(data)) {
+ data->has_temp |= BIT(3);
+ } else if (has_six_temp(data)) {
u8 reg = it87_read_value(data, IT87_REG_TEMP456_ENABLE);
/* Check for additional temperature sensors */
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4c60dc520b12..f958e830b23c 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -629,7 +629,7 @@ static struct i2c_driver jc42_driver = {
.pm = JC42_DEV_PM_OPS,
.of_match_table = of_match_ptr(jc42_of_ids),
},
- .probe_new = jc42_probe,
+ .probe = jc42_probe,
.remove = jc42_remove,
.id_table = jc42_id,
.detect = jc42_detect,
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index ef5a49cd9149..df69c380cde7 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -511,7 +511,7 @@ static struct i2c_driver pem_driver = {
.driver = {
.name = "lineage_pem",
},
- .probe_new = pem_probe,
+ .probe = pem_probe,
.id_table = pem_id,
};
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 9ab2cab4c710..6972454eb4e0 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1164,7 +1164,7 @@ static struct i2c_driver lm63_driver = {
.name = "lm63",
.of_match_table = of_match_ptr(lm63_of_match),
},
- .probe_new = lm63_probe,
+ .probe = lm63_probe,
.id_table = lm63_id,
.detect = lm63_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index b6433ae2d75c..637d35c5ae23 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -277,7 +277,7 @@ static struct i2c_driver lm73_driver = {
.name = "lm73",
.of_match_table = lm73_of_match,
},
- .probe_new = lm73_probe,
+ .probe = lm73_probe,
.id_table = lm73_ids,
.detect = lm73_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index dbb99ea4a0ec..182aeb388523 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -945,7 +945,7 @@ static struct i2c_driver lm75_driver = {
.of_match_table = of_match_ptr(lm75_of_match),
.pm = LM75_DEV_PM_OPS,
},
- .probe_new = lm75_probe,
+ .probe = lm75_probe,
.id_table = lm75_ids,
.detect = lm75_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 645cb2191abe..8b9862519178 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -348,7 +348,7 @@ static struct i2c_driver lm77_driver = {
.driver = {
.name = "lm77",
},
- .probe_new = lm77_probe,
+ .probe = lm77_probe,
.id_table = lm77_id,
.detect = lm77_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 694e171cab7f..b739c354311b 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -662,7 +662,7 @@ static struct i2c_driver lm78_driver = {
.driver = {
.name = "lm78",
},
- .probe_new = lm78_i2c_probe,
+ .probe = lm78_i2c_probe,
.id_table = lm78_i2c_id,
.detect = lm78_i2c_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 35db0b97f912..63c7831bd3e1 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -633,7 +633,7 @@ static struct i2c_driver lm80_driver = {
.driver = {
.name = "lm80",
},
- .probe_new = lm80_probe,
+ .probe = lm80_probe,
.id_table = lm80_id,
.detect = lm80_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index bcaf31ec176e..5befedca6abb 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -454,7 +454,7 @@ static struct i2c_driver lm83_driver = {
.driver = {
.name = "lm83",
},
- .probe_new = lm83_probe,
+ .probe = lm83_probe,
.id_table = lm83_id,
.detect = lm83_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 8d33c2484755..8540178f5b74 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1698,7 +1698,7 @@ static struct i2c_driver lm85_driver = {
.name = "lm85",
.of_match_table = of_match_ptr(lm85_of_match),
},
- .probe_new = lm85_probe,
+ .probe = lm85_probe,
.id_table = lm85_id,
.detect = lm85_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 818fb6195245..2195a735d28e 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -994,7 +994,7 @@ static struct i2c_driver lm87_driver = {
.name = "lm87",
.of_match_table = lm87_of_match,
},
- .probe_new = lm87_probe,
+ .probe = lm87_probe,
.id_table = lm87_id,
.detect = lm87_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 6498d5acf705..90101c236f35 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -2972,7 +2972,7 @@ static struct i2c_driver lm90_driver = {
.of_match_table = of_match_ptr(lm90_of_match),
.pm = pm_sleep_ptr(&lm90_pm_ops),
},
- .probe_new = lm90_probe,
+ .probe = lm90_probe,
.alert = lm90_alert,
.id_table = lm90_id,
.detect = lm90_detect,
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 2ff3044a677d..46579a3e1715 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -330,7 +330,7 @@ static struct i2c_driver lm92_driver = {
.driver = {
.name = "lm92",
},
- .probe_new = lm92_probe,
+ .probe = lm92_probe,
.id_table = lm92_id,
.detect = lm92_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 4cf50d5f4f59..75bca805720e 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2635,7 +2635,7 @@ static struct i2c_driver lm93_driver = {
.driver = {
.name = "lm93",
},
- .probe_new = lm93_probe,
+ .probe = lm93_probe,
.id_table = lm93_id,
.detect = lm93_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index b4a9d0c223c4..67b9d7636ee4 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -720,7 +720,7 @@ static struct i2c_driver lm95234_driver = {
.driver = {
.name = DRVNAME,
},
- .probe_new = lm95234_probe,
+ .probe = lm95234_probe,
.id_table = lm95234_id,
.detect = lm95234_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 647a90570bc6..475551f5024b 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -468,7 +468,7 @@ static struct i2c_driver lm95241_driver = {
.driver = {
.name = DEVNAME,
},
- .probe_new = lm95241_probe,
+ .probe = lm95241_probe,
.id_table = lm95241_id,
.detect = lm95241_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 4236d1e0544d..7f13b6b712fb 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -597,7 +597,7 @@ static struct i2c_driver lm95245_driver = {
.name = "lm95245",
.of_match_table = of_match_ptr(lm95245_of_match),
},
- .probe_new = lm95245_probe,
+ .probe = lm95245_probe,
.id_table = lm95245_id,
.detect = lm95245_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index 3494f7261ebf..45b87a863cae 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -519,7 +519,7 @@ static struct i2c_driver ltc2945_driver = {
.name = "ltc2945",
.of_match_table = of_match_ptr(ltc2945_of_match),
},
- .probe_new = ltc2945_probe,
+ .probe = ltc2945_probe,
.id_table = ltc2945_id,
};
diff --git a/drivers/hwmon/ltc2947-i2c.c b/drivers/hwmon/ltc2947-i2c.c
index 96852bc8a964..33f574bf2ce7 100644
--- a/drivers/hwmon/ltc2947-i2c.c
+++ b/drivers/hwmon/ltc2947-i2c.c
@@ -38,7 +38,7 @@ static struct i2c_driver ltc2947_driver = {
.of_match_table = ltc2947_of_match,
.pm = pm_sleep_ptr(&ltc2947_pm_ops),
},
- .probe_new = ltc2947_probe,
+ .probe = ltc2947_probe,
.id_table = ltc2947_id,
};
module_i2c_driver(ltc2947_driver);
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
index 689f788b8563..1ad362c0fd2c 100644
--- a/drivers/hwmon/ltc2990.c
+++ b/drivers/hwmon/ltc2990.c
@@ -268,7 +268,7 @@ static struct i2c_driver ltc2990_i2c_driver = {
.driver = {
.name = "ltc2990",
},
- .probe_new = ltc2990_i2c_probe,
+ .probe = ltc2990_i2c_probe,
.id_table = ltc2990_i2c_id,
};
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 429a7f5837f0..589bcd07ce7f 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -928,7 +928,7 @@ static struct i2c_driver ltc2992_i2c_driver = {
.name = "ltc2992",
.of_match_table = ltc2992_of_match,
},
- .probe_new = ltc2992_i2c_probe,
+ .probe = ltc2992_i2c_probe,
.id_table = ltc2992_i2c_id,
};
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index e3ac004c1ed1..f42ac3c9475e 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -205,7 +205,7 @@ static struct i2c_driver ltc4151_driver = {
.name = "ltc4151",
.of_match_table = of_match_ptr(ltc4151_match),
},
- .probe_new = ltc4151_probe,
+ .probe = ltc4151_probe,
.id_table = ltc4151_id,
};
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index fa43d26ddd4f..66fd28f713ab 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -255,7 +255,7 @@ static struct i2c_driver ltc4215_driver = {
.driver = {
.name = "ltc4215",
},
- .probe_new = ltc4215_probe,
+ .probe = ltc4215_probe,
.id_table = ltc4215_id,
};
diff --git a/drivers/hwmon/ltc4222.c b/drivers/hwmon/ltc4222.c
index d2027ca5c925..9098ef521739 100644
--- a/drivers/hwmon/ltc4222.c
+++ b/drivers/hwmon/ltc4222.c
@@ -210,7 +210,7 @@ static struct i2c_driver ltc4222_driver = {
.driver = {
.name = "ltc4222",
},
- .probe_new = ltc4222_probe,
+ .probe = ltc4222_probe,
.id_table = ltc4222_id,
};
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index fb9bc8dbe99b..b90184a3e0ec 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -479,7 +479,7 @@ static struct i2c_driver ltc4245_driver = {
.driver = {
.name = "ltc4245",
},
- .probe_new = ltc4245_probe,
+ .probe = ltc4245_probe,
.id_table = ltc4245_id,
};
diff --git a/drivers/hwmon/ltc4260.c b/drivers/hwmon/ltc4260.c
index 75e89cec381e..52f7a809b27b 100644
--- a/drivers/hwmon/ltc4260.c
+++ b/drivers/hwmon/ltc4260.c
@@ -173,7 +173,7 @@ static struct i2c_driver ltc4260_driver = {
.driver = {
.name = "ltc4260",
},
- .probe_new = ltc4260_probe,
+ .probe = ltc4260_probe,
.id_table = ltc4260_id,
};
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index b91cc4fe84e5..509e68176c7a 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -233,7 +233,7 @@ static struct i2c_driver ltc4261_driver = {
.driver = {
.name = "ltc4261",
},
- .probe_new = ltc4261_probe,
+ .probe = ltc4261_probe,
.id_table = ltc4261_id,
};
diff --git a/drivers/hwmon/max127.c b/drivers/hwmon/max127.c
index 49de1d2be294..ee5ead06d612 100644
--- a/drivers/hwmon/max127.c
+++ b/drivers/hwmon/max127.c
@@ -339,7 +339,7 @@ static struct i2c_driver max127_driver = {
.driver = {
.name = "max127",
},
- .probe_new = max127_probe,
+ .probe = max127_probe,
.id_table = max127_id,
};
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index daa5d8af1e69..aa38c45adc09 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -600,7 +600,7 @@ static struct i2c_driver max16065_driver = {
.driver = {
.name = "max16065",
},
- .probe_new = max16065_probe,
+ .probe = max16065_probe,
.id_table = max16065_id,
};
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 445c77197f69..500dc926a7a7 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -305,7 +305,7 @@ static struct i2c_driver max1619_driver = {
.name = "max1619",
.of_match_table = of_match_ptr(max1619_of_match),
},
- .probe_new = max1619_probe,
+ .probe = max1619_probe,
.id_table = max1619_id,
.detect = max1619_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 9f748973d6a3..c4a02edefbee 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -435,7 +435,7 @@ static struct i2c_driver max1668_driver = {
.driver = {
.name = "max1668",
},
- .probe_new = max1668_probe,
+ .probe = max1668_probe,
.id_table = max1668_id,
.detect = max1668_detect,
.address_list = max1668_addr_list,
diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c
index 924333d89ce4..b1300ca9db1f 100644
--- a/drivers/hwmon/max31730.c
+++ b/drivers/hwmon/max31730.c
@@ -427,7 +427,7 @@ static struct i2c_driver max31730_driver = {
.of_match_table = of_match_ptr(max31730_of_match),
.pm = pm_sleep_ptr(&max31730_pm_ops),
},
- .probe_new = max31730_probe,
+ .probe = max31730_probe,
.id_table = max31730_ids,
.detect = max31730_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max31760.c b/drivers/hwmon/max31760.c
index 4489110f109c..79945eb466ae 100644
--- a/drivers/hwmon/max31760.c
+++ b/drivers/hwmon/max31760.c
@@ -584,7 +584,7 @@ static struct i2c_driver max31760_driver = {
.of_match_table = max31760_of_match,
.pm = pm_ptr(&max31760_pm_ops)
},
- .probe_new = max31760_probe,
+ .probe = max31760_probe,
.id_table = max31760_id
};
module_i2c_driver(max31760_driver);
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 6caba6e8ee8f..0cd44c1e998a 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -544,7 +544,7 @@ MODULE_DEVICE_TABLE(i2c, max31790_id);
static struct i2c_driver max31790_driver = {
.class = I2C_CLASS_HWMON,
- .probe_new = max31790_probe,
+ .probe = max31790_probe,
.driver = {
.name = "max31790",
},
diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
index 0f277d945ea2..5d12fb9c9786 100644
--- a/drivers/hwmon/max6620.c
+++ b/drivers/hwmon/max6620.c
@@ -503,7 +503,7 @@ static struct i2c_driver max6620_driver = {
.driver = {
.name = "max6620",
},
- .probe_new = max6620_probe,
+ .probe = max6620_probe,
.id_table = max6620_id,
};
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 0656eb1e7959..7f709fd1af89 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -554,7 +554,7 @@ static struct i2c_driver max6621_driver = {
.name = MAX6621_DRV_NAME,
.of_match_table = of_match_ptr(max6621_of_match),
},
- .probe_new = max6621_probe,
+ .probe = max6621_probe,
.id_table = max6621_id,
};
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 9b895402c80d..caf527154fca 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -624,7 +624,7 @@ static struct i2c_driver max6639_driver = {
.name = "max6639",
.pm = pm_sleep_ptr(&max6639_pm_ops),
},
- .probe_new = max6639_probe,
+ .probe = max6639_probe,
.id_table = max6639_id,
.detect = max6639_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 47ea34ff78f3..8b2e4d6101a2 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -301,7 +301,7 @@ static struct i2c_driver max6642_driver = {
.driver = {
.name = "max6642",
},
- .probe_new = max6642_probe,
+ .probe = max6642_probe,
.id_table = max6642_id,
.detect = max6642_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 19e2c762ed2d..cc8428a3045d 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -819,7 +819,7 @@ static struct i2c_driver max6650_driver = {
.name = "max6650",
.of_match_table = of_match_ptr(max6650_dt_match),
},
- .probe_new = max6650_probe,
+ .probe = max6650_probe,
.id_table = max6650_id,
};
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 2895cea54193..3a67778f111c 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -786,7 +786,7 @@ static struct i2c_driver max6697_driver = {
.name = "max6697",
.of_match_table = of_match_ptr(max6697_of_match),
},
- .probe_new = max6697_probe,
+ .probe = max6697_probe,
.id_table = max6697_id,
};
diff --git a/drivers/hwmon/mc34vr500.c b/drivers/hwmon/mc34vr500.c
index 6a7a950a9332..0c8fd3fce83e 100644
--- a/drivers/hwmon/mc34vr500.c
+++ b/drivers/hwmon/mc34vr500.c
@@ -251,7 +251,7 @@ static struct i2c_driver mc34vr500_driver = {
.name = "mc34vr500",
.of_match_table = of_match_ptr(mc34vr500_of_match),
},
- .probe_new = mc34vr500_probe,
+ .probe = mc34vr500_probe,
.id_table = mc34vr500_id,
};
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index a5f7a294f33d..127e15ff3e76 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -198,7 +198,7 @@ static struct i2c_driver mcp3021_driver = {
.name = "mcp3021",
.of_match_table = of_match_ptr(of_mcp3021_match),
},
- .probe_new = mcp3021_probe,
+ .probe = mcp3021_probe,
.id_table = mcp3021_id,
};
diff --git a/drivers/hwmon/nct6775-i2c.c b/drivers/hwmon/nct6775-i2c.c
index e1bcd1146191..8acd83e1e009 100644
--- a/drivers/hwmon/nct6775-i2c.c
+++ b/drivers/hwmon/nct6775-i2c.c
@@ -183,7 +183,7 @@ static struct i2c_driver nct6775_i2c_driver = {
.name = "nct6775-i2c",
.of_match_table = of_match_ptr(nct6775_i2c_of_match),
},
- .probe_new = nct6775_i2c_probe,
+ .probe = nct6775_i2c_probe,
.id_table = nct6775_i2c_id,
};
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index a175f8283695..9339bfc02a3e 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -1223,7 +1223,7 @@ static struct i2c_driver nct7802_driver = {
.name = DRVNAME,
},
.detect = nct7802_detect,
- .probe_new = nct7802_probe,
+ .probe = nct7802_probe,
.id_table = nct7802_idtable,
.address_list = nct7802_address_list,
};
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 007bae4c7028..8f867d4570e1 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -1171,7 +1171,7 @@ static struct i2c_driver nct7904_driver = {
.driver = {
.name = "nct7904",
},
- .probe_new = nct7904_probe,
+ .probe = nct7904_probe,
.id_table = nct7904_id,
.detect = nct7904_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/occ/p8_i2c.c b/drivers/hwmon/occ/p8_i2c.c
index 9e1744fccb35..06095975f5c8 100644
--- a/drivers/hwmon/occ/p8_i2c.c
+++ b/drivers/hwmon/occ/p8_i2c.c
@@ -246,7 +246,7 @@ static struct i2c_driver p8_i2c_occ_driver = {
.name = "occ-hwmon",
.of_match_table = p8_i2c_occ_of_match,
},
- .probe_new = p8_i2c_occ_probe,
+ .probe = p8_i2c_occ_probe,
.remove = p8_i2c_occ_remove,
};
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
index ae67207030e8..7828c1d36695 100644
--- a/drivers/hwmon/oxp-sensors.c
+++ b/drivers/hwmon/oxp-sensors.c
@@ -42,8 +42,10 @@ static bool unlock_global_acpi_lock(void)
enum oxp_board {
aok_zoe_a1 = 1,
+ aya_neo_2,
aya_neo_air,
aya_neo_air_pro,
+ aya_neo_geek,
oxp_mini_amd,
oxp_mini_amd_pro,
};
@@ -60,35 +62,49 @@ static const struct dmi_system_id dmi_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1 AR07"),
},
- .driver_data = (void *) &(enum oxp_board) {aok_zoe_a1},
+ .driver_data = (void *)aok_zoe_a1,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)aya_neo_2,
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
},
- .driver_data = (void *) &(enum oxp_board) {aya_neo_air},
+ .driver_data = (void *)aya_neo_air,
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
},
- .driver_data = (void *) &(enum oxp_board) {aya_neo_air_pro},
+ .driver_data = (void *)aya_neo_air_pro,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GEEK"),
+ },
+ .driver_data = (void *)aya_neo_geek,
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
},
- .driver_data = (void *) &(enum oxp_board) {oxp_mini_amd},
+ .driver_data = (void *)oxp_mini_amd,
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER Mini Pro"),
},
- .driver_data = (void *) &(enum oxp_board) {oxp_mini_amd_pro},
+ .driver_data = (void *)oxp_mini_amd_pro,
},
{},
};
@@ -178,8 +194,10 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
if (ret)
return ret;
switch (board) {
+ case aya_neo_2:
case aya_neo_air:
case aya_neo_air_pro:
+ case aya_neo_geek:
case oxp_mini_amd:
*val = (*val * 255) / 100;
break;
@@ -217,8 +235,10 @@ static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
if (val < 0 || val > 255)
return -EINVAL;
switch (board) {
+ case aya_neo_2:
case aya_neo_air:
case aya_neo_air_pro:
+ case aya_neo_geek:
case oxp_mini_amd:
val = (val * 100) / 255;
break;
@@ -276,7 +296,7 @@ static int oxp_platform_probe(struct platform_device *pdev)
if (!dmi_entry || boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
- board = *((enum oxp_board *) dmi_entry->driver_data);
+ board = (enum oxp_board)dmi_entry->driver_data;
hwdev = devm_hwmon_device_register_with_info(dev, "oxpec", NULL,
&oxp_ec_chip_info, NULL);
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 1dbe209ae13f..66c76b28c9e0 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -294,7 +294,7 @@ static struct i2c_driver pcf8591_driver = {
.driver = {
.name = "pcf8591",
},
- .probe_new = pcf8591_probe,
+ .probe = pcf8591_probe,
.remove = pcf8591_remove,
.id_table = pcf8591_id,
};
diff --git a/drivers/hwmon/pmbus/acbel-fsg032.c b/drivers/hwmon/pmbus/acbel-fsg032.c
index 28a25f30e2cf..0a0ef4ce3493 100644
--- a/drivers/hwmon/pmbus/acbel-fsg032.c
+++ b/drivers/hwmon/pmbus/acbel-fsg032.c
@@ -73,7 +73,7 @@ static struct i2c_driver acbel_fsg032_driver = {
.name = "acbel-fsg032",
.of_match_table = acbel_fsg032_of_match,
},
- .probe_new = acbel_fsg032_probe,
+ .probe = acbel_fsg032_probe,
.id_table = acbel_fsg032_id,
};
diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
index 1ac2b2f4c570..aba70330b319 100644
--- a/drivers/hwmon/pmbus/adm1266.c
+++ b/drivers/hwmon/pmbus/adm1266.c
@@ -502,7 +502,7 @@ static struct i2c_driver adm1266_driver = {
.name = "adm1266",
.of_match_table = adm1266_of_match,
},
- .probe_new = adm1266_probe,
+ .probe = adm1266_probe,
.id_table = adm1266_id,
};
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 3b07bfb43e93..b358d137c76f 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -832,7 +832,7 @@ static struct i2c_driver adm1275_driver = {
.driver = {
.name = "adm1275",
},
- .probe_new = adm1275_probe,
+ .probe = adm1275_probe,
.id_table = adm1275_id,
};
diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c
index 4100eefb7ac3..fa5070ae26bc 100644
--- a/drivers/hwmon/pmbus/bel-pfe.c
+++ b/drivers/hwmon/pmbus/bel-pfe.c
@@ -120,7 +120,7 @@ static struct i2c_driver pfe_pmbus_driver = {
.driver = {
.name = "bel-pfe",
},
- .probe_new = pfe_pmbus_probe,
+ .probe = pfe_pmbus_probe,
.id_table = pfe_device_id,
};
diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
index f2d4e378a775..0dce26c35556 100644
--- a/drivers/hwmon/pmbus/bpa-rs600.c
+++ b/drivers/hwmon/pmbus/bpa-rs600.c
@@ -196,7 +196,7 @@ static struct i2c_driver bpa_rs600_driver = {
.name = "bpa-rs600",
.of_match_table = of_match_ptr(bpa_rs600_of_match),
},
- .probe_new = bpa_rs600_probe,
+ .probe = bpa_rs600_probe,
.id_table = bpa_rs600_id,
};
diff --git a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
index f546f0c12497..4dd3b6686d6a 100644
--- a/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
+++ b/drivers/hwmon/pmbus/delta-ahe50dc-fan.c
@@ -119,7 +119,7 @@ static struct i2c_driver ahe50dc_fan_driver = {
.name = "ahe50dc_fan",
.of_match_table = of_match_ptr(ahe50dc_fan_of_match),
},
- .probe_new = ahe50dc_fan_probe,
+ .probe = ahe50dc_fan_probe,
.id_table = ahe50dc_fan_id,
};
module_i2c_driver(ahe50dc_fan_driver);
diff --git a/drivers/hwmon/pmbus/dps920ab.c b/drivers/hwmon/pmbus/dps920ab.c
index d3941f6eb29a..f7ff3e4650b7 100644
--- a/drivers/hwmon/pmbus/dps920ab.c
+++ b/drivers/hwmon/pmbus/dps920ab.c
@@ -195,7 +195,7 @@ static struct i2c_driver dps920ab_driver = {
.name = "dps920ab",
.of_match_table = of_match_ptr(dps920ab_of_match),
},
- .probe_new = dps920ab_probe,
+ .probe = dps920ab_probe,
};
module_i2c_driver(dps920ab_driver);
diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
index c7469d2cdedc..72a7c261ef06 100644
--- a/drivers/hwmon/pmbus/fsp-3y.c
+++ b/drivers/hwmon/pmbus/fsp-3y.c
@@ -282,7 +282,7 @@ static struct i2c_driver fsp3y_driver = {
.driver = {
.name = "fsp3y",
},
- .probe_new = fsp3y_probe,
+ .probe = fsp3y_probe,
.id_table = fsp3y_id
};
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index 76e72e9acda7..c791925b8907 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -605,7 +605,7 @@ static struct i2c_driver ibm_cffps_driver = {
.name = "ibm-cffps",
.of_match_table = ibm_cffps_of_match,
},
- .probe_new = ibm_cffps_probe,
+ .probe = ibm_cffps_probe,
.id_table = ibm_cffps_id,
};
diff --git a/drivers/hwmon/pmbus/inspur-ipsps.c b/drivers/hwmon/pmbus/inspur-ipsps.c
index 0f614e8d95f6..dfeae68b5e52 100644
--- a/drivers/hwmon/pmbus/inspur-ipsps.c
+++ b/drivers/hwmon/pmbus/inspur-ipsps.c
@@ -215,7 +215,7 @@ static struct i2c_driver ipsps_driver = {
.name = "inspur-ipsps",
.of_match_table = of_match_ptr(ipsps_of_match),
},
- .probe_new = ipsps_probe,
+ .probe = ipsps_probe,
.id_table = ipsps_id,
};
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index a6cf98e49666..e3ee5c1bd967 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -136,7 +136,7 @@ static struct i2c_driver ir35221_driver = {
.driver = {
.name = "ir35221",
},
- .probe_new = ir35221_probe,
+ .probe = ir35221_probe,
.id_table = ir35221_id,
};
diff --git a/drivers/hwmon/pmbus/ir36021.c b/drivers/hwmon/pmbus/ir36021.c
index 4dca4767f571..382ba6b6031a 100644
--- a/drivers/hwmon/pmbus/ir36021.c
+++ b/drivers/hwmon/pmbus/ir36021.c
@@ -68,7 +68,7 @@ static struct i2c_driver ir36021_driver = {
.name = "ir36021",
.of_match_table = of_match_ptr(ir36021_of_id),
},
- .probe_new = ir36021_probe,
+ .probe = ir36021_probe,
.id_table = ir36021_id,
};
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 09276e397194..871c322d3d51 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -78,7 +78,7 @@ static struct i2c_driver ir38064_driver = {
.name = "ir38064",
.of_match_table = of_match_ptr(ir38064_of_match),
},
- .probe_new = ir38064_probe,
+ .probe = ir38064_probe,
.id_table = ir38064_id,
};
diff --git a/drivers/hwmon/pmbus/irps5401.c b/drivers/hwmon/pmbus/irps5401.c
index de3449e4d77a..146d32a35a7c 100644
--- a/drivers/hwmon/pmbus/irps5401.c
+++ b/drivers/hwmon/pmbus/irps5401.c
@@ -54,7 +54,7 @@ static struct i2c_driver irps5401_driver = {
.driver = {
.name = "irps5401",
},
- .probe_new = irps5401_probe,
+ .probe = irps5401_probe,
.id_table = irps5401_id,
};
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index 1a8caff1ac5f..7e53fb1d5ea3 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -323,7 +323,7 @@ static struct i2c_driver isl68137_driver = {
.driver = {
.name = "isl68137",
},
- .probe_new = isl68137_probe,
+ .probe = isl68137_probe,
.id_table = raa_dmpvr_id,
};
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 09792cd03d9f..929fa6d34efd 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -568,7 +568,7 @@ static struct i2c_driver lm25066_driver = {
.name = "lm25066",
.of_match_table = of_match_ptr(lm25066_of_match),
},
- .probe_new = lm25066_probe,
+ .probe = lm25066_probe,
.id_table = lm25066_id,
};
diff --git a/drivers/hwmon/pmbus/lt7182s.c b/drivers/hwmon/pmbus/lt7182s.c
index 4cfe476fc92d..28afc5f15ae8 100644
--- a/drivers/hwmon/pmbus/lt7182s.c
+++ b/drivers/hwmon/pmbus/lt7182s.c
@@ -183,7 +183,7 @@ static struct i2c_driver lt7182s_driver = {
.name = "lt7182s",
.of_match_table = of_match_ptr(lt7182s_of_match),
},
- .probe_new = lt7182s_probe,
+ .probe = lt7182s_probe,
.id_table = lt7182s_id,
};
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 91df8e895147..73a86f4d6472 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -927,7 +927,7 @@ static struct i2c_driver ltc2978_driver = {
.name = "ltc2978",
.of_match_table = of_match_ptr(ltc2978_of_match),
},
- .probe_new = ltc2978_probe,
+ .probe = ltc2978_probe,
.id_table = ltc2978_id,
};
diff --git a/drivers/hwmon/pmbus/ltc3815.c b/drivers/hwmon/pmbus/ltc3815.c
index 8e13a7ddcb42..f2023b17aa8d 100644
--- a/drivers/hwmon/pmbus/ltc3815.c
+++ b/drivers/hwmon/pmbus/ltc3815.c
@@ -199,7 +199,7 @@ static struct i2c_driver ltc3815_driver = {
.driver = {
.name = "ltc3815",
},
- .probe_new = ltc3815_probe,
+ .probe = ltc3815_probe,
.id_table = ltc3815_id,
};
diff --git a/drivers/hwmon/pmbus/max15301.c b/drivers/hwmon/pmbus/max15301.c
index 0b6f88428ea8..2cfaa62aedd6 100644
--- a/drivers/hwmon/pmbus/max15301.c
+++ b/drivers/hwmon/pmbus/max15301.c
@@ -178,7 +178,7 @@ static struct i2c_driver max15301_driver = {
.driver = {
.name = "max15301",
},
- .probe_new = max15301_probe,
+ .probe = max15301_probe,
.id_table = max15301_id,
};
diff --git a/drivers/hwmon/pmbus/max16064.c b/drivers/hwmon/pmbus/max16064.c
index 94f869039071..a573a0ab9e48 100644
--- a/drivers/hwmon/pmbus/max16064.c
+++ b/drivers/hwmon/pmbus/max16064.c
@@ -102,7 +102,7 @@ static struct i2c_driver max16064_driver = {
.driver = {
.name = "max16064",
},
- .probe_new = max16064_probe,
+ .probe = max16064_probe,
.id_table = max16064_id,
};
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
index 6724f723f74c..9b0177409109 100644
--- a/drivers/hwmon/pmbus/max16601.c
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -357,7 +357,7 @@ static struct i2c_driver max16601_driver = {
.driver = {
.name = "max16601",
},
- .probe_new = max16601_probe,
+ .probe = max16601_probe,
.id_table = max16601_id,
};
diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c
index ba39f03c6374..7bcf27995033 100644
--- a/drivers/hwmon/pmbus/max20730.c
+++ b/drivers/hwmon/pmbus/max20730.c
@@ -776,7 +776,7 @@ static struct i2c_driver max20730_driver = {
.name = "max20730",
.of_match_table = max20730_of_match,
},
- .probe_new = max20730_probe,
+ .probe = max20730_probe,
.id_table = max20730_id,
};
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
index 2272dc8c2e38..6ebd71cd081b 100644
--- a/drivers/hwmon/pmbus/max20751.c
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -42,7 +42,7 @@ static struct i2c_driver max20751_driver = {
.driver = {
.name = "max20751",
},
- .probe_new = max20751_probe,
+ .probe = max20751_probe,
.id_table = max20751_id,
};
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 95d79a64b483..f9aa576495a5 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -394,7 +394,7 @@ static struct i2c_driver max31785_driver = {
.name = "max31785",
.of_match_table = max31785_of_match,
},
- .probe_new = max31785_probe,
+ .probe = max31785_probe,
.id_table = max31785_id,
};
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index ea7609058a12..fe7f6b1b0985 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -520,7 +520,7 @@ static struct i2c_driver max34440_driver = {
.driver = {
.name = "max34440",
},
- .probe_new = max34440_probe,
+ .probe = max34440_probe,
.id_table = max34440_id,
};
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index 5e66c28c0b71..ae8573fdf5ba 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -182,7 +182,7 @@ static struct i2c_driver max8688_driver = {
.driver = {
.name = "max8688",
},
- .probe_new = max8688_probe,
+ .probe = max8688_probe,
.id_table = max8688_id,
};
diff --git a/drivers/hwmon/pmbus/mp2888.c b/drivers/hwmon/pmbus/mp2888.c
index 24e5194706cf..50662ed8e3d5 100644
--- a/drivers/hwmon/pmbus/mp2888.c
+++ b/drivers/hwmon/pmbus/mp2888.c
@@ -395,7 +395,7 @@ static struct i2c_driver mp2888_driver = {
.name = "mp2888",
.of_match_table = of_match_ptr(mp2888_of_match),
},
- .probe_new = mp2888_probe,
+ .probe = mp2888_probe,
.id_table = mp2888_id,
};
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index 51986adfbf47..2109b0458a8b 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -757,7 +757,7 @@ static struct i2c_driver mp2975_driver = {
.name = "mp2975",
.of_match_table = of_match_ptr(mp2975_of_match),
},
- .probe_new = mp2975_probe,
+ .probe = mp2975_probe,
.id_table = mp2975_id,
};
diff --git a/drivers/hwmon/pmbus/mp5023.c b/drivers/hwmon/pmbus/mp5023.c
index 791a06c3c54a..c4c4324d2b74 100644
--- a/drivers/hwmon/pmbus/mp5023.c
+++ b/drivers/hwmon/pmbus/mp5023.c
@@ -56,7 +56,7 @@ static struct i2c_driver mp5023_driver = {
.name = "mp5023",
.of_match_table = of_match_ptr(mp5023_of_match),
},
- .probe_new = mp5023_probe,
+ .probe = mp5023_probe,
};
module_i2c_driver(mp5023_driver);
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
index ff939881dc3b..865d42edda1a 100644
--- a/drivers/hwmon/pmbus/mpq7932.c
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -145,7 +145,7 @@ static struct i2c_driver mpq7932_regulator_driver = {
.name = "mpq7932",
.of_match_table = mpq7932_of_match,
},
- .probe_new = mpq7932_probe,
+ .probe = mpq7932_probe,
.id_table = mpq7932_id,
};
module_i2c_driver(mpq7932_regulator_driver);
diff --git a/drivers/hwmon/pmbus/pim4328.c b/drivers/hwmon/pmbus/pim4328.c
index 273ff6e57654..31d9ae06379a 100644
--- a/drivers/hwmon/pmbus/pim4328.c
+++ b/drivers/hwmon/pmbus/pim4328.c
@@ -221,7 +221,7 @@ static struct i2c_driver pim4328_driver = {
.driver = {
.name = "pim4328",
},
- .probe_new = pim4328_probe,
+ .probe = pim4328_probe,
.id_table = pim4328_id,
};
diff --git a/drivers/hwmon/pmbus/pli1209bc.c b/drivers/hwmon/pmbus/pli1209bc.c
index 05b4ee35ba27..7d8bd3167b21 100644
--- a/drivers/hwmon/pmbus/pli1209bc.c
+++ b/drivers/hwmon/pmbus/pli1209bc.c
@@ -134,7 +134,7 @@ static struct i2c_driver pli1209bc_driver = {
.name = "pli1209bc",
.of_match_table = of_match_ptr(pli1209bc_of_match),
},
- .probe_new = pli1209bc_probe,
+ .probe = pli1209bc_probe,
.id_table = pli1209bc_id,
};
diff --git a/drivers/hwmon/pmbus/pm6764tr.c b/drivers/hwmon/pmbus/pm6764tr.c
index e0bbc8a30d21..2a16504c85b7 100644
--- a/drivers/hwmon/pmbus/pm6764tr.c
+++ b/drivers/hwmon/pmbus/pm6764tr.c
@@ -64,7 +64,7 @@ static struct i2c_driver pm6764tr_driver = {
.name = "pm6764tr",
.of_match_table = of_match_ptr(pm6764tr_of_match),
},
- .probe_new = pm6764tr_probe,
+ .probe = pm6764tr_probe,
.id_table = pm6764tr_id,
};
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index d0d386990af5..ec40c5c59954 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -252,7 +252,7 @@ static struct i2c_driver pmbus_driver = {
.driver = {
.name = "pmbus",
},
- .probe_new = pmbus_probe,
+ .probe = pmbus_probe,
.id_table = pmbus_id,
};
diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
index 52bee5de2988..e2790a682dc8 100644
--- a/drivers/hwmon/pmbus/pxe1610.c
+++ b/drivers/hwmon/pmbus/pxe1610.c
@@ -139,7 +139,7 @@ static struct i2c_driver pxe1610_driver = {
.driver = {
.name = "pxe1610",
},
- .probe_new = pxe1610_probe,
+ .probe = pxe1610_probe,
.id_table = pxe1610_id,
};
diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c
index d3ba12951324..b830f3b02bcc 100644
--- a/drivers/hwmon/pmbus/q54sj108a2.c
+++ b/drivers/hwmon/pmbus/q54sj108a2.c
@@ -412,7 +412,7 @@ static struct i2c_driver q54sj108a2_driver = {
.name = "q54sj108a2",
.of_match_table = q54sj108a2_of_match,
},
- .probe_new = q54sj108a2_probe,
+ .probe = q54sj108a2_probe,
.id_table = q54sj108a2_id,
};
diff --git a/drivers/hwmon/pmbus/stpddc60.c b/drivers/hwmon/pmbus/stpddc60.c
index 357b9d9d896b..be9076dcc2db 100644
--- a/drivers/hwmon/pmbus/stpddc60.c
+++ b/drivers/hwmon/pmbus/stpddc60.c
@@ -237,7 +237,7 @@ static struct i2c_driver stpddc60_driver = {
.driver = {
.name = "stpddc60",
},
- .probe_new = stpddc60_probe,
+ .probe = stpddc60_probe,
.id_table = stpddc60_id,
};
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
index c3e781319cd1..450b0273fb59 100644
--- a/drivers/hwmon/pmbus/tda38640.c
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -62,7 +62,7 @@ static struct i2c_driver tda38640_driver = {
.name = "tda38640",
.of_match_table = of_match_ptr(tda38640_of_match),
},
- .probe_new = tda38640_probe,
+ .probe = tda38640_probe,
.id_table = tda38640_id,
};
diff --git a/drivers/hwmon/pmbus/tps40422.c b/drivers/hwmon/pmbus/tps40422.c
index 31bb83c0ef3e..ea0074a6b9fc 100644
--- a/drivers/hwmon/pmbus/tps40422.c
+++ b/drivers/hwmon/pmbus/tps40422.c
@@ -42,7 +42,7 @@ static struct i2c_driver tps40422_driver = {
.driver = {
.name = "tps40422",
},
- .probe_new = tps40422_probe,
+ .probe = tps40422_probe,
.id_table = tps40422_id,
};
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 81b9d813655a..ef99005a3af5 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -299,7 +299,7 @@ static struct i2c_driver tps53679_driver = {
.name = "tps53679",
.of_match_table = of_match_ptr(tps53679_of_match),
},
- .probe_new = tps53679_probe,
+ .probe = tps53679_probe,
.id_table = tps53679_id,
};
diff --git a/drivers/hwmon/pmbus/tps546d24.c b/drivers/hwmon/pmbus/tps546d24.c
index 435f94304ad8..69bbdb6c680b 100644
--- a/drivers/hwmon/pmbus/tps546d24.c
+++ b/drivers/hwmon/pmbus/tps546d24.c
@@ -59,7 +59,7 @@ static struct i2c_driver tps546d24_driver = {
.name = "tps546d24",
.of_match_table = of_match_ptr(tps546d24_of_match),
},
- .probe_new = tps546d24_probe,
+ .probe = tps546d24_probe,
.id_table = tps546d24_id,
};
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 3daaf2237832..ba712ff7666f 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -695,7 +695,7 @@ static struct i2c_driver ucd9000_driver = {
.name = "ucd9000",
.of_match_table = of_match_ptr(ucd9000_of_match),
},
- .probe_new = ucd9000_probe,
+ .probe = ucd9000_probe,
.id_table = ucd9000_id,
};
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 3ad375a76f3e..a82847945508 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -200,7 +200,7 @@ static struct i2c_driver ucd9200_driver = {
.name = "ucd9200",
.of_match_table = of_match_ptr(ucd9200_of_match),
},
- .probe_new = ucd9200_probe,
+ .probe = ucd9200_probe,
.id_table = ucd9200_id,
};
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index 32bc7736d609..37d08dd427d5 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -185,7 +185,7 @@ static struct i2c_driver xdpe122_driver = {
.name = "xdpe12284",
.of_match_table = of_match_ptr(xdpe122_of_match),
},
- .probe_new = xdpe122_probe,
+ .probe = xdpe122_probe,
.id_table = xdpe122_id,
};
diff --git a/drivers/hwmon/pmbus/xdpe152c4.c b/drivers/hwmon/pmbus/xdpe152c4.c
index b8a36ef73e45..235e6b41ae4c 100644
--- a/drivers/hwmon/pmbus/xdpe152c4.c
+++ b/drivers/hwmon/pmbus/xdpe152c4.c
@@ -63,7 +63,7 @@ static struct i2c_driver xdpe152_driver = {
.name = "xdpe152c4",
.of_match_table = of_match_ptr(xdpe152_of_match),
},
- .probe_new = xdpe152_probe,
+ .probe = xdpe152_probe,
.id_table = xdpe152_id,
};
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index e9df0c56d91e..83458df0d0cf 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -461,7 +461,7 @@ static struct i2c_driver zl6100_driver = {
.driver = {
.name = "zl6100",
},
- .probe_new = zl6100_probe,
+ .probe = zl6100_probe,
.id_table = zl6100_id,
};
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 9cb0c2de5219..4120cadb00ae 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -327,7 +327,7 @@ static struct i2c_driver powr1220_driver = {
.driver = {
.name = "powr1220",
},
- .probe_new = powr1220_probe,
+ .probe = powr1220_probe,
.id_table = powr1220_ids,
};
diff --git a/drivers/hwmon/sbrmi.c b/drivers/hwmon/sbrmi.c
index 529f0e766319..484703f0ea5f 100644
--- a/drivers/hwmon/sbrmi.c
+++ b/drivers/hwmon/sbrmi.c
@@ -347,7 +347,7 @@ static struct i2c_driver sbrmi_driver = {
.name = "sbrmi",
.of_match_table = of_match_ptr(sbrmi_of_match),
},
- .probe_new = sbrmi_probe,
+ .probe = sbrmi_probe,
.id_table = sbrmi_id,
};
diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c
index 7049d9464ac6..b79cece4ac9a 100644
--- a/drivers/hwmon/sbtsi_temp.c
+++ b/drivers/hwmon/sbtsi_temp.c
@@ -238,7 +238,7 @@ static struct i2c_driver sbtsi_driver = {
.name = "sbtsi",
.of_match_table = of_match_ptr(sbtsi_of_match),
},
- .probe_new = sbtsi_probe,
+ .probe = sbtsi_probe,
.id_table = sbtsi_id,
};
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index f50b90198f23..55c179475208 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -285,7 +285,7 @@ MODULE_DEVICE_TABLE(i2c, sht21_id);
static struct i2c_driver sht21_driver = {
.driver.name = "sht21",
- .probe_new = sht21_probe,
+ .probe = sht21_probe,
.id_table = sht21_id,
};
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 8305e44d9ab2..1dab3002728b 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -743,7 +743,7 @@ MODULE_DEVICE_TABLE(i2c, sht3x_ids);
static struct i2c_driver sht3x_i2c_driver = {
.driver.name = "sht3x",
- .probe_new = sht3x_probe,
+ .probe = sht3x_probe,
.id_table = sht3x_ids,
};
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 5bbe09135ab9..7ee797410458 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -291,7 +291,7 @@ static struct i2c_driver sht4x_driver = {
.name = "sht4x",
.of_match_table = sht4x_of_match,
},
- .probe_new = sht4x_probe,
+ .probe = sht4x_probe,
.id_table = sht4x_id,
};
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index 18546ebc8e9f..1f96e94967ee 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -279,7 +279,7 @@ static struct i2c_driver shtc1_i2c_driver = {
.name = "shtc1",
.of_match_table = shtc1_of_match,
},
- .probe_new = shtc1_probe,
+ .probe = shtc1_probe,
.id_table = shtc1_id,
};
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index c36bdbe423de..026c76f8c22e 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -694,7 +694,7 @@ static struct i2c_driver smm665_driver = {
.driver = {
.name = "smm665",
},
- .probe_new = smm665_probe,
+ .probe = smm665_probe,
.remove = smm665_remove,
.id_table = smm665_id,
};
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 70d2152234e2..d20800a1f02b 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -628,7 +628,7 @@ static struct i2c_driver smsc47m192_driver = {
.driver = {
.name = "smsc47m192",
},
- .probe_new = smsc47m192_probe,
+ .probe = smsc47m192_probe,
.id_table = smsc47m192_id,
.detect = smsc47m192_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 2f67c6747ead..847c99376930 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -821,7 +821,7 @@ static struct i2c_driver stts751_driver = {
.name = DEVNAME,
.of_match_table = of_match_ptr(stts751_of_match),
},
- .probe_new = stts751_probe,
+ .probe = stts751_probe,
.id_table = stts751_id,
.detect = stts751_detect,
.alert = stts751_alert,
diff --git a/drivers/hwmon/tc654.c b/drivers/hwmon/tc654.c
index 54cd33d09688..42a9658f1bc2 100644
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -561,7 +561,7 @@ static struct i2c_driver tc654_driver = {
.driver = {
.name = "tc654",
},
- .probe_new = tc654_probe,
+ .probe = tc654_probe,
.id_table = tc654_id,
};
diff --git a/drivers/hwmon/tc74.c b/drivers/hwmon/tc74.c
index ace55da97fc2..03950670bd78 100644
--- a/drivers/hwmon/tc74.c
+++ b/drivers/hwmon/tc74.c
@@ -160,7 +160,7 @@ static struct i2c_driver tc74_driver = {
.driver = {
.name = "tc74",
},
- .probe_new = tc74_probe,
+ .probe = tc74_probe,
.id_table = tc74_id,
};
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 81cdb012993c..68ba26bc9014 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -420,7 +420,7 @@ static struct i2c_driver thmc50_driver = {
.driver = {
.name = "thmc50",
},
- .probe_new = thmc50_probe,
+ .probe = thmc50_probe,
.id_table = thmc50_id,
.detect = thmc50_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index e271556efe0b..6359d12b2420 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -301,7 +301,7 @@ static struct i2c_driver tmp102_driver = {
.driver.name = DRIVER_NAME,
.driver.of_match_table = of_match_ptr(tmp102_of_match),
.driver.pm = pm_sleep_ptr(&tmp102_dev_pm_ops),
- .probe_new = tmp102_probe,
+ .probe = tmp102_probe,
.id_table = tmp102_id,
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index d257bb91fc69..a84c29a3a765 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -214,7 +214,7 @@ static struct i2c_driver tmp103_driver = {
.of_match_table = of_match_ptr(tmp103_of_match),
.pm = pm_sleep_ptr(&tmp103_dev_pm_ops),
},
- .probe_new = tmp103_probe,
+ .probe = tmp103_probe,
.id_table = tmp103_id,
};
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 43784c289a9e..300b24d5586b 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -432,7 +432,7 @@ static struct i2c_driver tmp108_driver = {
.pm = pm_sleep_ptr(&tmp108_dev_pm_ops),
.of_match_table = of_match_ptr(tmp108_of_ids),
},
- .probe_new = tmp108_probe,
+ .probe = tmp108_probe,
.id_table = tmp108_i2c_ids,
};
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index f358ba679626..91f2314568cf 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -766,7 +766,7 @@ static struct i2c_driver tmp401_driver = {
.name = "tmp401",
.of_match_table = of_match_ptr(tmp4xx_of_match),
},
- .probe_new = tmp401_probe,
+ .probe = tmp401_probe,
.id_table = tmp401_id,
.detect = tmp401_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 45fd7fb5ee01..3cde3916ab6d 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -487,7 +487,7 @@ static struct i2c_driver tmp421_driver = {
.name = "tmp421",
.of_match_table = of_match_ptr(tmp421_of_match),
},
- .probe_new = tmp421_probe,
+ .probe = tmp421_probe,
.id_table = tmp421_id,
.detect = tmp421_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
index 9213a493a590..8a88032d8e9d 100644
--- a/drivers/hwmon/tmp464.c
+++ b/drivers/hwmon/tmp464.c
@@ -699,7 +699,7 @@ static struct i2c_driver tmp464_driver = {
.name = "tmp464",
.of_match_table = of_match_ptr(tmp464_of_match),
},
- .probe_new = tmp464_probe,
+ .probe = tmp464_probe,
.id_table = tmp464_id,
.detect = tmp464_detect,
.address_list = normal_i2c,
diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
index 0693eaee054f..bff10f4b56e1 100644
--- a/drivers/hwmon/tmp513.c
+++ b/drivers/hwmon/tmp513.c
@@ -760,7 +760,7 @@ static struct i2c_driver tmp51x_driver = {
.name = "tmp51x",
.of_match_table = tmp51x_of_match,
},
- .probe_new = tmp51x_probe,
+ .probe = tmp51x_probe,
.id_table = tmp51x_id,
};
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 85a75f551148..8fbbb29ae11d 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -581,7 +581,7 @@ static const struct of_device_id __maybe_unused tps23861_of_match[] = {
MODULE_DEVICE_TABLE(of, tps23861_of_match);
static struct i2c_driver tps23861_driver = {
- .probe_new = tps23861_probe,
+ .probe = tps23861_probe,
.remove = tps23861_remove,
.driver = {
.name = "tps23861",
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index 8dbcd05abd9a..7f3615f5587c 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -295,7 +295,7 @@ static struct i2c_driver w83773_driver = {
.name = "w83773g",
.of_match_table = of_match_ptr(w83773_of_match),
},
- .probe_new = w83773_probe,
+ .probe = w83773_probe,
.id_table = w83773_id,
};
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index dacabf25e83f..b33f382f238d 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1585,7 +1585,7 @@ static struct i2c_driver w83781d_driver = {
.name = "w83781d",
.of_match_table = w83781d_of_match,
},
- .probe_new = w83781d_probe,
+ .probe = w83781d_probe,
.remove = w83781d_remove,
.id_table = w83781d_ids,
.detect = w83781d_detect,
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index eaf691365023..9681eaa06c8e 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -338,7 +338,7 @@ static struct i2c_driver w83791d_driver = {
.driver = {
.name = "w83791d",
},
- .probe_new = w83791d_probe,
+ .probe = w83791d_probe,
.remove = w83791d_remove,
.id_table = w83791d_id,
.detect = w83791d_detect,
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 6d160eee1446..69ce379a9e13 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -306,7 +306,7 @@ static struct i2c_driver w83792d_driver = {
.driver = {
.name = "w83792d",
},
- .probe_new = w83792d_probe,
+ .probe = w83792d_probe,
.remove = w83792d_remove,
.id_table = w83792d_id,
.detect = w83792d_detect,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index a4926d907198..96bab94ba899 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -301,7 +301,7 @@ static struct i2c_driver w83793_driver = {
.driver = {
.name = "w83793",
},
- .probe_new = w83793_probe,
+ .probe = w83793_probe,
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 84ff5c57e98c..c446e00db658 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2255,7 +2255,7 @@ static struct i2c_driver w83795_driver = {
.driver = {
.name = "w83795",
},
- .probe_new = w83795_probe,
+ .probe = w83795_probe,
.remove = w83795_remove,
.id_table = w83795_id,
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index f3622de0d96f..9c11ed69c055 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -84,7 +84,7 @@ static struct i2c_driver w83l785ts_driver = {
.driver = {
.name = "w83l785ts",
},
- .probe_new = w83l785ts_probe,
+ .probe = w83l785ts_probe,
.remove = w83l785ts_remove,
.id_table = w83l785ts_id,
.detect = w83l785ts_detect,
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 5597e1c2d95c..75874cf7851c 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -751,7 +751,7 @@ static struct i2c_driver w83l786ng_driver = {
.driver = {
.name = "w83l786ng",
},
- .probe_new = w83l786ng_probe,
+ .probe = w83l786ng_probe,
.id_table = w83l786ng_id,
.detect = w83l786ng_detect,
.address_list = normal_i2c,
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 5f7d81b44b1d..282e539157f8 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
adev = ACPI_COMPANION(indio_dev->dev.parent);
if (!adev)
- return 0;
+ return -ENXIO;
/* Read _ONT data, which should be a package of 6 integers. */
status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
if (status == AE_NOT_FOUND) {
- return 0;
+ return -ENXIO;
} else if (ACPI_FAILURE(status)) {
dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
status);
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 38394341fd6e..5a5dd5e87ffc 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = {
.unprepare = ad4130_int_clk_unprepare,
};
+static void ad4130_clk_del_provider(void *of_node)
+{
+ of_clk_del_provider(of_node);
+}
+
static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
@@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
struct clk_init_data init;
const char *clk_name;
struct clk *clk;
+ int ret;
if (st->int_pin_sel == AD4130_INT_PIN_CLK ||
st->mclk_sel != AD4130_MCLK_76_8KHZ)
@@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
if (IS_ERR(clk))
return PTR_ERR(clk);
- return of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+ ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node);
}
static int ad4130_setup(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 55a6ab591016..99bb604b78c8 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = {
__AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
-#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
- __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
- BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
-
#define AD719x_TEMP_CHANNEL(_si, _address) \
__AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
@@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = {
AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
- AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
+ AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M),
AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
@@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = {
AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
- AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
+ AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index a775d2e40567..dce9ec91e4a7 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
{
struct imx93_adc *adc = iio_priv(indio_dev);
struct device *dev = adc->dev;
- long ret;
- u32 vref_uv;
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
@@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- ret = vref_uv = regulator_get_voltage(adc->vref);
+ ret = regulator_get_voltage(adc->vref);
if (ret < 0)
return ret;
- *val = vref_uv / 1000;
+ *val = ret / 1000;
*val2 = 12;
return IIO_VAL_FRACTIONAL_LOG2;
diff --git a/drivers/iio/adc/mt6370-adc.c b/drivers/iio/adc/mt6370-adc.c
index bc62e5a9d50d..0bc112135bca 100644
--- a/drivers/iio/adc/mt6370-adc.c
+++ b/drivers/iio/adc/mt6370-adc.c
@@ -19,6 +19,7 @@
#include <dt-bindings/iio/adc/mediatek,mt6370_adc.h>
+#define MT6370_REG_DEV_INFO 0x100
#define MT6370_REG_CHG_CTRL3 0x113
#define MT6370_REG_CHG_CTRL7 0x117
#define MT6370_REG_CHG_ADC 0x121
@@ -27,6 +28,7 @@
#define MT6370_ADC_START_MASK BIT(0)
#define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4)
#define MT6370_AICR_ICHG_MASK GENMASK(7, 2)
+#define MT6370_VENID_MASK GENMASK(7, 4)
#define MT6370_AICR_100_mA 0x0
#define MT6370_AICR_150_mA 0x1
@@ -47,6 +49,10 @@
#define ADC_CONV_TIME_MS 35
#define ADC_CONV_POLLING_TIME_US 1000
+#define MT6370_VID_RT5081 0x8
+#define MT6370_VID_RT5081A 0xA
+#define MT6370_VID_MT6370 0xE
+
struct mt6370_adc_data {
struct device *dev;
struct regmap *regmap;
@@ -55,6 +61,7 @@ struct mt6370_adc_data {
* from being read at the same time.
*/
struct mutex adc_lock;
+ unsigned int vid;
};
static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan,
@@ -98,6 +105,30 @@ adc_unlock:
return ret;
}
+static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv)
+{
+ switch (priv->vid) {
+ case MT6370_VID_RT5081:
+ case MT6370_VID_RT5081A:
+ case MT6370_VID_MT6370:
+ return 3350;
+ default:
+ return 3875;
+ }
+}
+
+static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv)
+{
+ switch (priv->vid) {
+ case MT6370_VID_RT5081:
+ case MT6370_VID_RT5081A:
+ case MT6370_VID_MT6370:
+ return 2680;
+ default:
+ return 3870;
+ }
+}
+
static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
int chan, int *val1, int *val2)
{
@@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
case MT6370_AICR_250_mA:
case MT6370_AICR_300_mA:
case MT6370_AICR_350_mA:
- *val1 = 3350;
+ *val1 = mt6370_adc_get_ibus_scale(priv);
break;
default:
*val1 = 5000;
@@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
case MT6370_ICHG_600_mA:
case MT6370_ICHG_700_mA:
case MT6370_ICHG_800_mA:
- *val1 = 2680;
+ *val1 = mt6370_adc_get_ibat_scale(priv);
break;
default:
*val1 = 5000;
@@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = {
MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)),
};
+static int mt6370_get_vendor_info(struct mt6370_adc_data *priv)
+{
+ unsigned int dev_info;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
+ if (ret)
+ return ret;
+
+ priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
+
+ return 0;
+}
+
static int mt6370_adc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev)
priv->regmap = regmap;
mutex_init(&priv->adc_lock);
+ ret = mt6370_get_vendor_info(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get vid\n");
+
ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0);
if (ret)
return dev_err_probe(dev, ret, "Failed to reset ADC\n");
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index bca79a93cbe4..a50f39143d3e 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
ret = mxs_lradc_adc_trigger_init(iio);
if (ret)
- goto err_trig;
+ return ret;
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
&mxs_lradc_adc_trigger_handler,
&mxs_lradc_adc_buffer_ops);
if (ret)
- return ret;
+ goto err_trig;
adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
@@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
err_dev:
mxs_lradc_adc_hw_stop(adc);
- mxs_lradc_adc_trigger_remove(iio);
-err_trig:
iio_triggered_buffer_cleanup(iio);
+err_trig:
+ mxs_lradc_adc_trigger_remove(iio);
return ret;
}
@@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
iio_device_unregister(iio);
mxs_lradc_adc_hw_stop(adc);
- mxs_lradc_adc_trigger_remove(iio);
iio_triggered_buffer_cleanup(iio);
+ mxs_lradc_adc_trigger_remove(iio);
return 0;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index c1c439215aeb..7dfc9c927a23 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret = 0;
- if (adc_chan > PALMAS_ADC_CH_MAX)
+ if (adc_chan >= PALMAS_ADC_CH_MAX)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret = 0;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev,
int adc_chan = chan->channel;
int ret;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
@@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev,
int old;
int ret;
- if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+ if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
return -EINVAL;
mutex_lock(&adc->lock);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 1aadb2ad2cab..bd7e2408bf28 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -2006,16 +2006,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
* to get the *real* number of channels.
*/
ret = device_property_count_u32(dev, "st,adc-diff-channels");
- if (ret < 0)
- return ret;
-
- ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
- if (ret > adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
- return -EINVAL;
- } else if (ret > 0) {
- adc->num_diff = ret;
- num_channels += ret;
+ if (ret > 0) {
+ ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
+ if (ret > adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
+ return -EINVAL;
+ } else if (ret > 0) {
+ adc->num_diff = ret;
+ num_channels += ret;
+ }
}
/* Optional sample time is provided either for each, or all channels */
@@ -2037,6 +2036,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
struct device *dev = &indio_dev->dev;
u32 num_diff = adc->num_diff;
+ int num_se = nchans - num_diff;
int size = num_diff * sizeof(*diff) / sizeof(u32);
int scan_index = 0, ret, i, c;
u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
@@ -2063,29 +2063,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
scan_index++;
}
}
-
- ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
- nchans);
- if (ret)
- return ret;
-
- for (c = 0; c < nchans; c++) {
- if (chans[c] >= adc_info->max_channels) {
- dev_err(&indio_dev->dev, "Invalid channel %d\n",
- chans[c]);
- return -EINVAL;
+ if (num_se > 0) {
+ ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret);
+ return ret;
}
- /* Channel can't be configured both as single-ended & diff */
- for (i = 0; i < num_diff; i++) {
- if (chans[c] == diff[i].vinp) {
- dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]);
+ for (c = 0; c < num_se; c++) {
+ if (chans[c] >= adc_info->max_channels) {
+ dev_err(&indio_dev->dev, "Invalid channel %d\n",
+ chans[c]);
return -EINVAL;
}
+
+ /* Channel can't be configured both as single-ended & diff */
+ for (i = 0; i < num_diff; i++) {
+ if (chans[c] == diff[i].vinp) {
+ dev_err(&indio_dev->dev, "channel %d misconfigured\n",
+ chans[c]);
+ return -EINVAL;
+ }
+ }
+ stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+ chans[c], 0, scan_index, false);
+ scan_index++;
}
- stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
- chans[c], 0, scan_index, false);
- scan_index++;
}
if (adc->nsmps > 0) {
@@ -2306,7 +2309,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
if (legacy)
ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
- num_channels);
+ timestamping ? num_channels - 1 : num_channels);
else
ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
if (ret < 0)
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index 07e9f6ae16a8..e3366cf5eb31 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -1007,7 +1007,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev,
ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
val);
- if (ret)
+ if (ret < 0)
return ret;
ad74413r_adc_to_resistance_result(*val, val);
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 8bb68975b259..7653261d2dc2 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -337,6 +337,17 @@ free_gains:
return ret;
}
+static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
+ int num_times)
+{
+ int i;
+
+ for (i = 0; i < num_times; i++) {
+ int_micro_times[i * 2] = time_us[i] / 1000000;
+ int_micro_times[i * 2 + 1] = time_us[i] % 1000000;
+ }
+}
+
/**
* iio_gts_build_avail_time_table - build table of available integration times
* @gts: Gain time scale descriptor
@@ -351,7 +362,7 @@ free_gains:
*/
static int iio_gts_build_avail_time_table(struct iio_gts *gts)
{
- int *times, i, j, idx = 0;
+ int *times, i, j, idx = 0, *int_micro_times;
if (!gts->num_itime)
return 0;
@@ -378,13 +389,24 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts)
}
}
}
- gts->avail_time_tables = times;
- /*
- * This is just to survive a unlikely corner-case where times in the
- * given time table were not unique. Else we could just trust the
- * gts->num_itime.
- */
- gts->num_avail_time_tables = idx;
+
+ /* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */
+ int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL);
+ if (int_micro_times) {
+ /*
+ * This is just to survive a unlikely corner-case where times in
+ * the given time table were not unique. Else we could just
+ * trust the gts->num_itime.
+ */
+ gts->num_avail_time_tables = idx;
+ iio_gts_us_to_int_micro(times, int_micro_times, idx);
+ }
+
+ gts->avail_time_tables = int_micro_times;
+ kfree(times);
+
+ if (!int_micro_times)
+ return -ENOMEM;
return 0;
}
@@ -683,8 +705,8 @@ int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
return -EINVAL;
*vals = gts->avail_time_tables;
- *type = IIO_VAL_INT;
- *length = gts->num_avail_time_tables;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = gts->num_avail_time_tables * 2;
return IIO_AVAIL_LIST;
}
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index e486dcf35eba..25c9b79574a5 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -1167,11 +1167,12 @@ static int bu27034_read_raw(struct iio_dev *idev,
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *val = bu27034_get_int_time(data);
- if (*val < 0)
- return *val;
+ *val = 0;
+ *val2 = bu27034_get_int_time(data);
+ if (*val2 < 0)
+ return *val2;
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
return bu27034_get_scale(data, chan->channel, val, val2);
@@ -1229,7 +1230,10 @@ static int bu27034_write_raw(struct iio_dev *idev,
ret = bu27034_set_scale(data, chan->channel, val, val2);
break;
case IIO_CHAN_INFO_INT_TIME:
- ret = bu27034_try_set_int_time(data, val);
+ if (!val)
+ ret = bu27034_try_set_int_time(data, val2);
+ else
+ ret = -EINVAL;
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 14e29330e972..94f5d611e98c 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -8,6 +8,7 @@
* TODO: Proximity
*/
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -42,6 +43,7 @@
#define VCNL4035_ALS_PERS_MASK GENMASK(3, 2)
#define VCNL4035_INT_ALS_IF_H_MASK BIT(12)
#define VCNL4035_INT_ALS_IF_L_MASK BIT(13)
+#define VCNL4035_DEV_ID_MASK GENMASK(7, 0)
/* Default values */
#define VCNL4035_MODE_ALS_ENABLE BIT(0)
@@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data)
return ret;
}
+ id = FIELD_GET(VCNL4035_DEV_ID_MASK, id);
if (id != VCNL4035_DEV_ID_VAL) {
dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
id, VCNL4035_DEV_ID_VAL);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index 28bb7efe8df8..e155a75b3cd2 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -296,12 +296,13 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
return ret;
ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
- if (ret)
- return ret;
pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
+ if (ret)
+ return ret;
+
switch (chan->address) {
case TEMPERATURE:
*val = t;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 6de37c5d7d27..b96a15d4a202 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -755,14 +755,13 @@ static int create_workqueues(struct hfi1_devdata *dd)
}
if (!ppd->link_wq) {
/*
- * Make the link workqueue single-threaded to enforce
+ * Make the link workqueue ordered to enforce
* serialization.
*/
ppd->link_wq =
- alloc_workqueue(
+ alloc_ordered_workqueue(
"hfi_link_%d_%d",
- WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, /* max_active */
+ WQ_SYSFS | WQ_MEM_RECLAIM,
dd->unit, pidx);
if (!ppd->link_wq)
goto wq_error;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index dea605b7f683..dbda318ef469 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1356,7 +1356,7 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (cleanup_err)
rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
- kfree_rcu(mr);
+ kfree(mr);
return 0;
err_out:
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 735f90b74ee5..3bdbd34314b3 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -168,7 +168,7 @@ config INPUT_EVBUG
config INPUT_KUNIT_TEST
tristate "KUnit tests for Input" if !KUNIT_ALL_TESTS
- depends on INPUT && KUNIT=y
+ depends on INPUT && KUNIT
default KUNIT_ALL_TESTS
help
Say Y here if you want to build the KUnit tests for the input
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 37e876d45eb9..8c5fdb0f858a 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -190,6 +190,7 @@ static int input_handle_abs_event(struct input_dev *dev,
unsigned int code, int *pval)
{
struct input_mt *mt = dev->mt;
+ bool is_new_slot = false;
bool is_mt_event;
int *pold;
@@ -210,6 +211,7 @@ static int input_handle_abs_event(struct input_dev *dev,
pold = &dev->absinfo[code].value;
} else if (mt) {
pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
+ is_new_slot = mt->slot != dev->absinfo[ABS_MT_SLOT].value;
} else {
/*
* Bypass filtering for multi-touch events when
@@ -228,8 +230,8 @@ static int input_handle_abs_event(struct input_dev *dev,
}
/* Flush pending "slot" event */
- if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
- input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
+ if (is_new_slot) {
+ dev->absinfo[ABS_MT_SLOT].value = mt->slot;
return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
}
@@ -703,7 +705,7 @@ void input_close_device(struct input_handle *handle)
__input_release_device(handle);
- if (!dev->inhibited && !--dev->users) {
+ if (!--dev->users && !dev->inhibited) {
if (dev->poller)
input_dev_poller_stop(dev->poller);
if (dev->close)
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 8a9ebfc04a2d..f5e96b36acda 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -149,7 +149,7 @@
/* Control 3 Register */
#define DRV260X_LRA_OPEN_LOOP (1 << 0)
-#define DRV260X_ANANLOG_IN (1 << 1)
+#define DRV260X_ANALOG_IN (1 << 1)
#define DRV260X_LRA_DRV_MODE (1 << 2)
#define DRV260X_RTP_UNSIGNED_DATA (1 << 3)
#define DRV260X_SUPPLY_COMP_DIS (1 << 4)
@@ -186,51 +186,13 @@ struct drv260x_data {
struct work_struct work;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- u32 magnitude;
+ u8 magnitude;
u32 mode;
u32 library;
int rated_voltage;
int overdrive_voltage;
};
-static const struct reg_default drv260x_reg_defs[] = {
- { DRV260X_STATUS, 0xe0 },
- { DRV260X_MODE, 0x40 },
- { DRV260X_RT_PB_IN, 0x00 },
- { DRV260X_LIB_SEL, 0x00 },
- { DRV260X_WV_SEQ_1, 0x01 },
- { DRV260X_WV_SEQ_2, 0x00 },
- { DRV260X_WV_SEQ_3, 0x00 },
- { DRV260X_WV_SEQ_4, 0x00 },
- { DRV260X_WV_SEQ_5, 0x00 },
- { DRV260X_WV_SEQ_6, 0x00 },
- { DRV260X_WV_SEQ_7, 0x00 },
- { DRV260X_WV_SEQ_8, 0x00 },
- { DRV260X_GO, 0x00 },
- { DRV260X_OVERDRIVE_OFF, 0x00 },
- { DRV260X_SUSTAIN_P_OFF, 0x00 },
- { DRV260X_SUSTAIN_N_OFF, 0x00 },
- { DRV260X_BRAKE_OFF, 0x00 },
- { DRV260X_A_TO_V_CTRL, 0x05 },
- { DRV260X_A_TO_V_MIN_INPUT, 0x19 },
- { DRV260X_A_TO_V_MAX_INPUT, 0xff },
- { DRV260X_A_TO_V_MIN_OUT, 0x19 },
- { DRV260X_A_TO_V_MAX_OUT, 0xff },
- { DRV260X_RATED_VOLT, 0x3e },
- { DRV260X_OD_CLAMP_VOLT, 0x8c },
- { DRV260X_CAL_COMP, 0x0c },
- { DRV260X_CAL_BACK_EMF, 0x6c },
- { DRV260X_FEEDBACK_CTRL, 0x36 },
- { DRV260X_CTRL1, 0x93 },
- { DRV260X_CTRL2, 0xfa },
- { DRV260X_CTRL3, 0xa0 },
- { DRV260X_CTRL4, 0x20 },
- { DRV260X_CTRL5, 0x80 },
- { DRV260X_LRA_LOOP_PERIOD, 0x33 },
- { DRV260X_VBAT_MON, 0x00 },
- { DRV260X_LRA_RES_PERIOD, 0x00 },
-};
-
#define DRV260X_DEF_RATED_VOLT 0x90
#define DRV260X_DEF_OD_CLAMP_VOLT 0x90
@@ -275,10 +237,11 @@ static int drv260x_haptics_play(struct input_dev *input, void *data,
haptics->mode = DRV260X_LRA_NO_CAL_MODE;
+ /* Scale u16 magnitude into u8 register value */
if (effect->u.rumble.strong_magnitude > 0)
- haptics->magnitude = effect->u.rumble.strong_magnitude;
+ haptics->magnitude = effect->u.rumble.strong_magnitude >> 8;
else if (effect->u.rumble.weak_magnitude > 0)
- haptics->magnitude = effect->u.rumble.weak_magnitude;
+ haptics->magnitude = effect->u.rumble.weak_magnitude >> 8;
else
haptics->magnitude = 0;
@@ -304,7 +267,7 @@ static void drv260x_close(struct input_dev *input)
static const struct reg_sequence drv260x_lra_cal_regs[] = {
{ DRV260X_MODE, DRV260X_AUTO_CAL },
- { DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
+ { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_RTP_UNSIGNED_DATA },
{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
};
@@ -322,7 +285,7 @@ static const struct reg_sequence drv260x_lra_init_regs[] = {
DRV260X_BEMF_GAIN_3 },
{ DRV260X_CTRL1, DRV260X_STARTUP_BOOST },
{ DRV260X_CTRL2, DRV260X_SAMP_TIME_250 },
- { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_ANANLOG_IN },
+ { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_RTP_UNSIGNED_DATA | DRV260X_ANALOG_IN },
{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
};
@@ -337,7 +300,7 @@ static const struct reg_sequence drv260x_erm_cal_regs[] = {
{ DRV260X_CTRL1, DRV260X_STARTUP_BOOST },
{ DRV260X_CTRL2, DRV260X_SAMP_TIME_250 | DRV260X_BLANK_TIME_75 |
DRV260X_IDISS_TIME_75 },
- { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_ERM_OPEN_LOOP },
+ { DRV260X_CTRL3, DRV260X_NG_THRESH_2 | DRV260X_RTP_UNSIGNED_DATA },
{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
};
@@ -435,6 +398,7 @@ static int drv260x_init(struct drv260x_data *haptics)
}
do {
+ usleep_range(15000, 15500);
error = regmap_read(haptics->regmap, DRV260X_GO, &cal_buf);
if (error) {
dev_err(&haptics->client->dev,
@@ -452,8 +416,6 @@ static const struct regmap_config drv260x_regmap_config = {
.val_bits = 8,
.max_register = DRV260X_MAX_REG,
- .reg_defaults = drv260x_reg_defs,
- .num_reg_defaults = ARRAY_SIZE(drv260x_reg_defs),
.cache_type = REGCACHE_NONE,
};
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index d0e58a7cdfa3..2ba035299db8 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -11,6 +11,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*/
+#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@
struct pwm_vibrator {
struct input_dev *input;
+ struct gpio_desc *enable_gpio;
struct pwm_device *pwm;
struct pwm_device *pwm_dir;
struct regulator *vcc;
@@ -42,19 +44,21 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
if (!vibrator->vcc_on) {
err = regulator_enable(vibrator->vcc);
if (err) {
- dev_err(pdev, "failed to enable regulator: %d", err);
+ dev_err(pdev, "failed to enable regulator: %d\n", err);
return err;
}
vibrator->vcc_on = true;
}
+ gpiod_set_value_cansleep(vibrator->enable_gpio, 1);
+
pwm_get_state(vibrator->pwm, &state);
pwm_set_relative_duty_cycle(&state, vibrator->level, 0xffff);
state.enabled = true;
err = pwm_apply_state(vibrator->pwm, &state);
if (err) {
- dev_err(pdev, "failed to apply pwm state: %d", err);
+ dev_err(pdev, "failed to apply pwm state: %d\n", err);
return err;
}
@@ -65,7 +69,7 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
err = pwm_apply_state(vibrator->pwm_dir, &state);
if (err) {
- dev_err(pdev, "failed to apply dir-pwm state: %d", err);
+ dev_err(pdev, "failed to apply dir-pwm state: %d\n", err);
pwm_disable(vibrator->pwm);
return err;
}
@@ -80,6 +84,8 @@ static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
pwm_disable(vibrator->pwm_dir);
pwm_disable(vibrator->pwm);
+ gpiod_set_value_cansleep(vibrator->enable_gpio, 0);
+
if (vibrator->vcc_on) {
regulator_disable(vibrator->vcc);
vibrator->vcc_on = false;
@@ -137,7 +143,17 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
err = PTR_ERR_OR_ZERO(vibrator->vcc);
if (err) {
if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request regulator: %d",
+ dev_err(&pdev->dev, "Failed to request regulator: %d\n",
+ err);
+ return err;
+ }
+
+ vibrator->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ err = PTR_ERR_OR_ZERO(vibrator->enable_gpio);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to request enable gpio: %d\n",
err);
return err;
}
@@ -146,7 +162,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
err = PTR_ERR_OR_ZERO(vibrator->pwm);
if (err) {
if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request main pwm: %d",
+ dev_err(&pdev->dev, "Failed to request main pwm: %d\n",
err);
return err;
}
@@ -158,7 +174,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
state.enabled = false;
err = pwm_apply_state(vibrator->pwm, &state);
if (err) {
- dev_err(&pdev->dev, "failed to apply initial PWM state: %d",
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
err);
return err;
}
@@ -172,7 +188,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
state.enabled = false;
err = pwm_apply_state(vibrator->pwm_dir, &state);
if (err) {
- dev_err(&pdev->dev, "failed to apply initial PWM state: %d",
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
err);
return err;
}
@@ -189,7 +205,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
break;
default:
- dev_err(&pdev->dev, "Failed to request direction pwm: %d", err);
+ dev_err(&pdev->dev, "Failed to request direction pwm: %d\n", err);
fallthrough;
case -EPROBE_DEFER:
@@ -207,13 +223,13 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
err = input_ff_create_memless(vibrator->input, NULL,
pwm_vibrator_play_effect);
if (err) {
- dev_err(&pdev->dev, "Couldn't create FF dev: %d", err);
+ dev_err(&pdev->dev, "Couldn't create FF dev: %d\n", err);
return err;
}
err = input_register_device(vibrator->input);
if (err) {
- dev_err(&pdev->dev, "Couldn't register input dev: %d", err);
+ dev_err(&pdev->dev, "Couldn't register input dev: %d\n", err);
return err;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index f2593133e524..d98212d55108 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -33,6 +33,7 @@
#define UINPUT_NAME "uinput"
#define UINPUT_BUFFER_SIZE 16
#define UINPUT_NUM_REQUESTS 16
+#define UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS 10
enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
@@ -569,11 +570,40 @@ static int uinput_setup_device_legacy(struct uinput_device *udev,
return retval;
}
+/*
+ * Returns true if the given timestamp is valid (i.e., if all the following
+ * conditions are satisfied), false otherwise.
+ * 1) given timestamp is positive
+ * 2) it's within the allowed offset before the current time
+ * 3) it's not in the future
+ */
+static bool is_valid_timestamp(const ktime_t timestamp)
+{
+ ktime_t zero_time;
+ ktime_t current_time;
+ ktime_t min_time;
+ ktime_t offset;
+
+ zero_time = ktime_set(0, 0);
+ if (ktime_compare(zero_time, timestamp) >= 0)
+ return false;
+
+ current_time = ktime_get();
+ offset = ktime_set(UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS, 0);
+ min_time = ktime_sub(current_time, offset);
+
+ if (ktime_after(min_time, timestamp) || ktime_after(timestamp, current_time))
+ return false;
+
+ return true;
+}
+
static ssize_t uinput_inject_events(struct uinput_device *udev,
const char __user *buffer, size_t count)
{
struct input_event ev;
size_t bytes = 0;
+ ktime_t timestamp;
if (count != 0 && count < input_event_size())
return -EINVAL;
@@ -588,6 +618,10 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
if (input_event_from_user(buffer + bytes, &ev))
return -EFAULT;
+ timestamp = ktime_set(ev.input_event_sec, ev.input_event_usec * NSEC_PER_USEC);
+ if (is_valid_timestamp(timestamp))
+ input_set_timestamp(udev->dev, timestamp);
+
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
cond_resched();
diff --git a/drivers/input/tests/input_test.c b/drivers/input/tests/input_test.c
index e5a6c1ad2167..0540225f0288 100644
--- a/drivers/input/tests/input_test.c
+++ b/drivers/input/tests/input_test.c
@@ -43,8 +43,8 @@ static void input_test_exit(struct kunit *test)
{
struct input_dev *input_dev = test->priv;
- input_unregister_device(input_dev);
- input_free_device(input_dev);
+ if (input_dev)
+ input_unregister_device(input_dev);
}
static void input_test_poll(struct input_dev *input) { }
@@ -87,7 +87,7 @@ static void input_test_timestamp(struct kunit *test)
static void input_test_match_device_id(struct kunit *test)
{
struct input_dev *input_dev = test->priv;
- struct input_device_id id;
+ struct input_device_id id = { 0 };
/*
* Must match when the input device bus, vendor, product, version
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 996bf434e1cb..edea1823fd36 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -55,6 +55,7 @@
#define MXT_TOUCH_KEYARRAY_T15 15
#define MXT_TOUCH_PROXIMITY_T23 23
#define MXT_TOUCH_PROXKEY_T52 52
+#define MXT_TOUCH_PTC_KEYS_T97 97
#define MXT_PROCI_GRIPFACE_T20 20
#define MXT_PROCG_NOISE_T22 22
#define MXT_PROCI_ONETOUCH_T24 24
@@ -326,9 +327,13 @@ struct mxt_data {
u16 T71_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
+ u8 T15_reportid_min;
+ u8 T15_reportid_max;
u16 T18_address;
u8 T19_reportid;
u16 T44_address;
+ u8 T97_reportid_min;
+ u8 T97_reportid_max;
u8 T100_reportid_min;
u8 T100_reportid_max;
@@ -344,6 +349,9 @@ struct mxt_data {
u32 *t19_keymap;
unsigned int t19_num_keys;
+ u32 *t15_keymap;
+ unsigned int t15_num_keys;
+
enum mxt_suspend_mode suspend_mode;
u32 wakeup_method;
@@ -375,6 +383,7 @@ static bool mxt_object_readable(unsigned int type)
case MXT_TOUCH_KEYARRAY_T15:
case MXT_TOUCH_PROXIMITY_T23:
case MXT_TOUCH_PROXKEY_T52:
+ case MXT_TOUCH_PTC_KEYS_T97:
case MXT_TOUCH_MULTITOUCHSCREEN_T100:
case MXT_PROCI_GRIPFACE_T20:
case MXT_PROCG_NOISE_T22:
@@ -891,6 +900,24 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
data->update_input = true;
}
+static void mxt_proc_t15_messages(struct mxt_data *data, u8 *message)
+{
+ struct input_dev *input_dev = data->input_dev;
+ unsigned long keystates = get_unaligned_le32(&message[2]);
+ int key;
+
+ for (key = 0; key < data->t15_num_keys; key++)
+ input_report_key(input_dev, data->t15_keymap[key],
+ keystates & BIT(key));
+
+ data->update_input = true;
+}
+
+static void mxt_proc_t97_messages(struct mxt_data *data, u8 *message)
+{
+ mxt_proc_t15_messages(data, message);
+}
+
static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
{
struct device *dev = &data->client->dev;
@@ -1017,6 +1044,12 @@ static int mxt_proc_message(struct mxt_data *data, u8 *message)
} else if (report_id >= data->T9_reportid_min &&
report_id <= data->T9_reportid_max) {
mxt_proc_t9_message(data, message);
+ } else if (report_id >= data->T15_reportid_min &&
+ report_id <= data->T15_reportid_max) {
+ mxt_proc_t15_messages(data, message);
+ } else if (report_id >= data->T97_reportid_min &&
+ report_id <= data->T97_reportid_max) {
+ mxt_proc_t97_messages(data, message);
} else if (report_id >= data->T100_reportid_min &&
report_id <= data->T100_reportid_max) {
mxt_proc_t100_message(data, message);
@@ -1689,9 +1722,13 @@ static void mxt_free_object_table(struct mxt_data *data)
data->T71_address = 0;
data->T9_reportid_min = 0;
data->T9_reportid_max = 0;
+ data->T15_reportid_min = 0;
+ data->T15_reportid_max = 0;
data->T18_address = 0;
data->T19_reportid = 0;
data->T44_address = 0;
+ data->T97_reportid_min = 0;
+ data->T97_reportid_max = 0;
data->T100_reportid_min = 0;
data->T100_reportid_max = 0;
data->max_reportid = 0;
@@ -1764,6 +1801,10 @@ static int mxt_parse_object_table(struct mxt_data *data,
object->num_report_ids - 1;
data->num_touchids = object->num_report_ids;
break;
+ case MXT_TOUCH_KEYARRAY_T15:
+ data->T15_reportid_min = min_id;
+ data->T15_reportid_max = max_id;
+ break;
case MXT_SPT_COMMSCONFIG_T18:
data->T18_address = object->start_address;
break;
@@ -1773,6 +1814,10 @@ static int mxt_parse_object_table(struct mxt_data *data,
case MXT_SPT_GPIOPWM_T19:
data->T19_reportid = min_id;
break;
+ case MXT_TOUCH_PTC_KEYS_T97:
+ data->T97_reportid_min = min_id;
+ data->T97_reportid_max = max_id;
+ break;
case MXT_TOUCH_MULTITOUCHSCREEN_T100:
data->multitouch = MXT_TOUCH_MULTITOUCHSCREEN_T100;
data->T100_reportid_min = min_id;
@@ -2050,6 +2095,7 @@ static int mxt_initialize_input_device(struct mxt_data *data)
int error;
unsigned int num_mt_slots;
unsigned int mt_flags = 0;
+ int i;
switch (data->multitouch) {
case MXT_TOUCH_MULTI_T9:
@@ -2095,6 +2141,10 @@ static int mxt_initialize_input_device(struct mxt_data *data)
input_dev->open = mxt_input_open;
input_dev->close = mxt_input_close;
+ input_dev->keycode = data->t15_keymap;
+ input_dev->keycodemax = data->t15_num_keys;
+ input_dev->keycodesize = sizeof(data->t15_keymap[0]);
+
input_set_capability(input_dev, EV_KEY, BTN_TOUCH);
/* For single touch */
@@ -2162,6 +2212,13 @@ static int mxt_initialize_input_device(struct mxt_data *data)
0, 255, 0, 0);
}
+ /* For T15 and T97 Key Array */
+ if (data->T15_reportid_min || data->T97_reportid_min) {
+ for (i = 0; i < data->t15_num_keys; i++)
+ input_set_capability(input_dev,
+ EV_KEY, data->t15_keymap[i]);
+ }
+
input_set_drvdata(input_dev, data);
error = input_register_device(input_dev);
@@ -3080,8 +3137,10 @@ static void mxt_input_close(struct input_dev *dev)
static int mxt_parse_device_properties(struct mxt_data *data)
{
static const char keymap_property[] = "linux,gpio-keymap";
+ static const char buttons_property[] = "linux,keycodes";
struct device *dev = &data->client->dev;
u32 *keymap;
+ u32 *buttonmap;
int n_keys;
int error;
@@ -3111,6 +3170,32 @@ static int mxt_parse_device_properties(struct mxt_data *data)
data->t19_num_keys = n_keys;
}
+ if (device_property_present(dev, buttons_property)) {
+ n_keys = device_property_count_u32(dev, buttons_property);
+ if (n_keys <= 0) {
+ error = n_keys < 0 ? n_keys : -EINVAL;
+ dev_err(dev, "invalid/malformed '%s' property: %d\n",
+ buttons_property, error);
+ return error;
+ }
+
+ buttonmap = devm_kmalloc_array(dev, n_keys, sizeof(*buttonmap),
+ GFP_KERNEL);
+ if (!buttonmap)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(dev, buttons_property,
+ buttonmap, n_keys);
+ if (error) {
+ dev_err(dev, "failed to parse '%s' property: %d\n",
+ buttons_property, error);
+ return error;
+ }
+
+ data->t15_keymap = buttonmap;
+ data->t15_num_keys = n_keys;
+ }
+
return 0;
}
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 0cd6f626adec..7cb26929dc73 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1263,9 +1263,8 @@ static void cyttsp4_stop_wd_timer(struct cyttsp4 *cd)
* Ensure we wait until the watchdog timer
* running on a different CPU finishes
*/
- del_timer_sync(&cd->watchdog_timer);
+ timer_shutdown_sync(&cd->watchdog_timer);
cancel_work_sync(&cd->watchdog_work);
- del_timer_sync(&cd->watchdog_timer);
}
static void cyttsp4_watchdog_timer(struct timer_list *t)
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index 30102cb80fac..937026178656 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -43,6 +43,7 @@
#define HID_DESC_REG 0x1
#define HID_INPUT_REG 0x3
#define HID_OUTPUT_REG 0x4
+#define HID_COMMAND_REG 0x5
#define REPORT_ID_TOUCH 0x1
#define REPORT_ID_BTN 0x3
@@ -68,6 +69,7 @@
#define HID_APP_OUTPUT_REPORT_ID 0x2F
#define HID_BL_RESPONSE_REPORT_ID 0x30
#define HID_BL_OUTPUT_REPORT_ID 0x40
+#define HID_RESPONSE_REPORT_ID 0xF0
#define HID_OUTPUT_RESPONSE_REPORT_OFFSET 2
#define HID_OUTPUT_RESPONSE_CMD_OFFSET 4
@@ -78,9 +80,15 @@
#define HID_SYSINFO_BTN_MASK GENMASK(7, 0)
#define HID_SYSINFO_MAX_BTN 8
+#define HID_CMD_SET_POWER 0x8
+
+#define HID_POWER_ON 0x0
+#define HID_POWER_SLEEP 0x1
+
#define CY_HID_OUTPUT_TIMEOUT_MS 200
#define CY_HID_OUTPUT_GET_SYSINFO_TIMEOUT_MS 3000
#define CY_HID_GET_HID_DESCRIPTOR_TIMEOUT_MS 4000
+#define CY_HID_SET_POWER_TIMEOUT 500
/* maximum number of concurrent tracks */
#define TOUCH_REPORT_SIZE 10
@@ -100,6 +108,14 @@
#define TOUCH_REPORT_USAGE_PG_MIN 0xFF010063
#define TOUCH_COL_USAGE_PG 0x000D0022
+#define SET_CMD_LOW(byte, bits) \
+ ((byte) = (((byte) & 0xF0) | ((bits) & 0x0F)))
+#define SET_CMD_HIGH(byte, bits)\
+ ((byte) = (((byte) & 0x0F) | ((bits) & 0xF0)))
+#define SET_CMD_OPCODE(byte, opcode) SET_CMD_LOW(byte, opcode)
+#define SET_CMD_REPORT_TYPE(byte, type) SET_CMD_HIGH(byte, ((type) << 4))
+#define SET_CMD_REPORT_ID(byte, id) SET_CMD_LOW(byte, id)
+
/* System Information interface definitions */
struct cyttsp5_sensing_conf_data_dev {
u8 electrodes_x;
@@ -557,10 +573,44 @@ static int cyttsp5_hid_output_get_sysinfo(struct cyttsp5 *ts)
return cyttsp5_get_sysinfo_regs(ts);
}
+static int cyttsp5_power_control(struct cyttsp5 *ts, bool on)
+{
+ u8 state = on ? HID_POWER_ON : HID_POWER_SLEEP;
+ u8 cmd[2] = { 0 };
+ int rc;
+
+ SET_CMD_REPORT_TYPE(cmd[0], 0);
+ SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP);
+ SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER);
+
+ rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd));
+ if (rc) {
+ dev_err(ts->dev, "Failed to write power command %d", rc);
+ return rc;
+ }
+
+ rc = wait_for_completion_interruptible_timeout(&ts->cmd_done,
+ msecs_to_jiffies(CY_HID_SET_POWER_TIMEOUT));
+ if (rc <= 0) {
+ dev_err(ts->dev, "HID power cmd execution timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (ts->response_buf[2] != HID_RESPONSE_REPORT_ID ||
+ (ts->response_buf[3] & 0x03) != state ||
+ (ts->response_buf[4] & 0x0f) != HID_CMD_SET_POWER) {
+ dev_err(ts->dev, "Validation of the %s response failed\n",
+ on ? "wakeup" : "sleep");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts)
{
int rc;
- u8 cmd[HID_OUTPUT_BL_LAUNCH_APP];
+ u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE];
u16 crc;
put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd);
@@ -601,12 +651,7 @@ static int cyttsp5_get_hid_descriptor(struct cyttsp5 *ts,
struct cyttsp5_hid_desc *desc)
{
struct device *dev = ts->dev;
- __le16 hid_desc_register = cpu_to_le16(HID_DESC_REG);
int rc;
- u8 cmd[2];
-
- /* Set HID descriptor register */
- memcpy(cmd, &hid_desc_register, sizeof(hid_desc_register));
rc = cyttsp5_write(ts, HID_DESC_REG, NULL, 0);
if (rc) {
@@ -675,6 +720,10 @@ static irqreturn_t cyttsp5_handle_irq(int irq, void *handle)
case HID_BTN_REPORT_ID:
cyttsp5_btn_attention(ts->dev);
break;
+ case HID_RESPONSE_REPORT_ID:
+ memcpy(ts->response_buf, ts->input_buf, size);
+ complete(&ts->cmd_done);
+ break;
default:
/* It is not an input but a command response */
memcpy(ts->response_buf, ts->input_buf, size);
@@ -886,10 +935,33 @@ static const struct i2c_device_id cyttsp5_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cyttsp5_i2c_id);
+static int __maybe_unused cyttsp5_suspend(struct device *dev)
+{
+ struct cyttsp5 *ts = dev_get_drvdata(dev);
+
+ if (!device_may_wakeup(dev))
+ cyttsp5_power_control(ts, false);
+
+ return 0;
+}
+
+static int __maybe_unused cyttsp5_resume(struct device *dev)
+{
+ struct cyttsp5 *ts = dev_get_drvdata(dev);
+
+ if (!device_may_wakeup(dev))
+ cyttsp5_power_control(ts, true);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cyttsp5_pm, cyttsp5_suspend, cyttsp5_resume);
+
static struct i2c_driver cyttsp5_i2c_driver = {
.driver = {
.name = CYTTSP5_NAME,
.of_match_table = cyttsp5_of_match,
+ .pm = &cyttsp5_pm,
},
.probe_new = cyttsp5_i2c_probe,
.id_table = cyttsp5_i2c_id,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 24ab9e9f5b21..3a1a5e76cd68 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1241,6 +1241,7 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
if (tsdata->wake_gpio) {
usleep_range(5000, 6000);
gpiod_set_value_cansleep(tsdata->wake_gpio, 1);
+ usleep_range(5000, 6000);
}
if (tsdata->reset_gpio) {
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 4897fafa4204..ee4e739936dd 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -370,22 +370,33 @@ static int ili251x_firmware_update_resolution(struct device *dev)
/* The firmware update blob might have changed the resolution. */
error = priv->chip->read_reg(client, REG_PANEL_INFO, &rs, sizeof(rs));
- if (error)
- return error;
+ if (!error) {
+ resx = le16_to_cpup((__le16 *)rs);
+ resy = le16_to_cpup((__le16 *)(rs + 2));
- resx = le16_to_cpup((__le16 *)rs);
- resy = le16_to_cpup((__le16 *)(rs + 2));
+ /* The value reported by the firmware is invalid. */
+ if (!resx || resx == 0xffff || !resy || resy == 0xffff)
+ error = -EINVAL;
+ }
- /* The value reported by the firmware is invalid. */
- if (!resx || resx == 0xffff || !resy || resy == 0xffff)
- return -EINVAL;
+ /*
+ * In case of error, the firmware might be stuck in bootloader mode,
+ * e.g. after a failed firmware update. Set maximum resolution, but
+ * do not fail to probe, so the user can re-trigger the firmware
+ * update and recover the touch controller.
+ */
+ if (error) {
+ dev_warn(dev, "Invalid resolution reported by controller.\n");
+ resx = 16384;
+ resy = 16384;
+ }
input_abs_set_max(priv->input, ABS_X, resx - 1);
input_abs_set_max(priv->input, ABS_Y, resy - 1);
input_abs_set_max(priv->input, ABS_MT_POSITION_X, resx - 1);
input_abs_set_max(priv->input, ABS_MT_POSITION_Y, resy - 1);
- return 0;
+ return error;
}
static ssize_t ili251x_firmware_update_firmware_version(struct device *dev)
@@ -977,11 +988,10 @@ static int ili210x_i2c_probe(struct i2c_client *client)
if (priv->chip->has_pressure_reg)
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
error = ili251x_firmware_update_cached_state(dev);
- if (error) {
- dev_err(dev, "Unable to cache firmware information, err: %d\n",
- error);
- return error;
- }
+ if (error)
+ dev_warn(dev, "Unable to cache firmware information, err: %d\n",
+ error);
+
touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches,
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index 75c92e282fa2..19a4a085f73a 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -1035,7 +1035,6 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
{
struct device *dev = &mdp->pdev->dev;
struct device_node *node, *parent;
- const struct mtk_mdp_driver_data *data = mdp->mdp_data;
parent = dev->of_node->parent;
@@ -1045,7 +1044,7 @@ static int mdp_comp_sub_create(struct mdp_dev *mdp)
int id, alias_id;
struct mdp_comp *comp;
- of_id = of_match_node(data->mdp_sub_comp_dt_ids, node);
+ of_id = of_match_node(mdp->mdp_data->mdp_sub_comp_dt_ids, node);
if (!of_id)
continue;
if (!of_device_is_available(node)) {
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 238521622b75..253e77189b69 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -378,8 +378,8 @@ static int mxc_isi_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops mxc_isi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
- SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
+ RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -528,7 +528,7 @@ static struct platform_driver mxc_isi_driver = {
.driver = {
.of_match_table = mxc_isi_of_match,
.name = MXC_ISI_DRIVER_NAME,
- .pm = &mxc_isi_pm_ops,
+ .pm = pm_ptr(&mxc_isi_pm_ops),
}
};
module_platform_driver(mxc_isi_driver);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
index db538f3d88ec..19e80b95ffea 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c
@@ -29,11 +29,10 @@ static inline void mxc_isi_write(struct mxc_isi_pipe *pipe, u32 reg, u32 val)
void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr)
{
- mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, dma_addr);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_IN_BUF_ADDR, lower_32_bits(dma_addr));
if (pipe->isi->pdata->has_36bit_dma)
- mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR, dma_addr >> 32);
-#endif
+ mxc_isi_write(pipe, CHNL_IN_BUF_XTND_ADDR,
+ upper_32_bits(dma_addr));
}
void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
@@ -45,34 +44,36 @@ void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe,
val = mxc_isi_read(pipe, CHNL_OUT_BUF_CTRL);
if (buf_id == MXC_ISI_BUF1) {
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y, dma_addrs[0]);
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U, dma_addrs[1]);
- mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V, dma_addrs[2]);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_Y,
+ lower_32_bits(dma_addrs[0]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_U,
+ lower_32_bits(dma_addrs[1]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF1_ADDR_V,
+ lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF1_XTND_ADDR,
- dma_addrs[0] >> 32);
+ upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF1_XTND_ADDR,
- dma_addrs[1] >> 32);
+ upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF1_XTND_ADDR,
- dma_addrs[2] >> 32);
+ upper_32_bits(dma_addrs[2]));
}
-#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR;
} else {
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y, dma_addrs[0]);
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U, dma_addrs[1]);
- mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V, dma_addrs[2]);
-#if CONFIG_ARCH_DMA_ADDR_T_64BIT
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_Y,
+ lower_32_bits(dma_addrs[0]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_U,
+ lower_32_bits(dma_addrs[1]));
+ mxc_isi_write(pipe, CHNL_OUT_BUF2_ADDR_V,
+ lower_32_bits(dma_addrs[2]));
if (pipe->isi->pdata->has_36bit_dma) {
mxc_isi_write(pipe, CHNL_Y_BUF2_XTND_ADDR,
- dma_addrs[0] >> 32);
+ upper_32_bits(dma_addrs[0]));
mxc_isi_write(pipe, CHNL_U_BUF2_XTND_ADDR,
- dma_addrs[1] >> 32);
+ upper_32_bits(dma_addrs[1]));
mxc_isi_write(pipe, CHNL_V_BUF2_XTND_ADDR,
- dma_addrs[2] >> 32);
+ upper_32_bits(dma_addrs[2]));
}
-#endif
val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR;
}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
index 98bfd445a649..2a77353f10b5 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c
@@ -728,11 +728,9 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
case V4L2_FIELD_NONE:
- vnmc = VNMC_IM_ODD_EVEN;
- progressive = true;
- break;
case V4L2_FIELD_ALTERNATE:
vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
break;
default:
vnmc = VNMC_IM_ODD;
@@ -1312,12 +1310,23 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
}
if (rvin_scaler_needed(vin)) {
+ /* Gen3 can't scale NV12 */
+ if (vin->info->model == RCAR_GEN3 &&
+ vin->format.pixelformat == V4L2_PIX_FMT_NV12)
+ return -EPIPE;
+
if (!vin->scaler)
return -EPIPE;
} else {
- if (fmt.format.width != vin->format.width ||
- fmt.format.height != vin->format.height)
- return -EPIPE;
+ if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) {
+ if (ALIGN(fmt.format.width, 32) != vin->format.width ||
+ ALIGN(fmt.format.height, 32) != vin->format.height)
+ return -EPIPE;
+ } else {
+ if (fmt.format.width != vin->format.width ||
+ fmt.format.height != vin->format.height)
+ return -EPIPE;
+ }
}
if (fmt.format.code != vin->mbus_code)
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 025bb628aaf3..75fcba45ec1b 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -7,6 +7,7 @@
* Copyright (C) 2019-2020 Cogent Embedded, Inc.
*/
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -163,6 +164,11 @@ static const struct regmap_access_table rpcif_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
};
+struct rpcif_info {
+ enum rpcif_type type;
+ u8 strtim;
+};
+
struct rpcif_priv {
struct device *dev;
void __iomem *base;
@@ -171,7 +177,7 @@ struct rpcif_priv {
struct reset_control *rstc;
struct platform_device *vdev;
size_t size;
- enum rpcif_type type;
+ const struct rpcif_info *info;
enum rpcif_data_dir dir;
u8 bus_size;
u8 xfer_size;
@@ -186,6 +192,26 @@ struct rpcif_priv {
u32 ddr; /* DRDRENR or SMDRENR */
};
+static const struct rpcif_info rpcif_info_r8a7796 = {
+ .type = RPCIF_RCAR_GEN3,
+ .strtim = 6,
+};
+
+static const struct rpcif_info rpcif_info_gen3 = {
+ .type = RPCIF_RCAR_GEN3,
+ .strtim = 7,
+};
+
+static const struct rpcif_info rpcif_info_rz_g2l = {
+ .type = RPCIF_RZ_G2L,
+ .strtim = 7,
+};
+
+static const struct rpcif_info rpcif_info_gen4 = {
+ .type = RPCIF_RCAR_GEN4,
+ .strtim = 15,
+};
+
/*
* Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
* proper width. Requires rpcif_priv.xfer_size to be correctly set before!
@@ -310,7 +336,7 @@ int rpcif_hw_init(struct device *dev, bool hyperflash)
if (ret)
return ret;
- if (rpc->type == RPCIF_RZ_G2L) {
+ if (rpc->info->type == RPCIF_RZ_G2L) {
ret = reset_control_reset(rpc->rstc);
if (ret)
return ret;
@@ -324,12 +350,10 @@ int rpcif_hw_init(struct device *dev, bool hyperflash)
/* DMA Transfer is not supported */
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_HS, 0);
- if (rpc->type == RPCIF_RCAR_GEN3)
- regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
- RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7));
- else if (rpc->type == RPCIF_RCAR_GEN4)
- regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
- RPCIF_PHYCNT_STRTIM(15), RPCIF_PHYCNT_STRTIM(15));
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ /* create mask with all affected bits set */
+ RPCIF_PHYCNT_STRTIM(BIT(fls(rpc->info->strtim)) - 1),
+ RPCIF_PHYCNT_STRTIM(rpc->info->strtim));
regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3),
RPCIF_PHYOFFSET1_DDRTMG(3));
@@ -340,7 +364,7 @@ int rpcif_hw_init(struct device *dev, bool hyperflash)
regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
RPCIF_PHYINT_WPVAL, 0);
- if (rpc->type == RPCIF_RZ_G2L)
+ if (rpc->info->type == RPCIF_RZ_G2L)
regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
RPCIF_CMNCR_BSZ(3),
@@ -729,9 +753,9 @@ static int rpcif_probe(struct platform_device *pdev)
rpc->dirmap = devm_ioremap_resource(dev, res);
if (IS_ERR(rpc->dirmap))
return PTR_ERR(rpc->dirmap);
- rpc->size = resource_size(res);
- rpc->type = (uintptr_t)of_device_get_match_data(dev);
+ rpc->size = resource_size(res);
+ rpc->info = of_device_get_match_data(dev);
rpc->rstc = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(rpc->rstc))
return PTR_ERR(rpc->rstc);
@@ -764,9 +788,10 @@ static int rpcif_remove(struct platform_device *pdev)
}
static const struct of_device_id rpcif_of_match[] = {
- { .compatible = "renesas,rcar-gen3-rpc-if", .data = (void *)RPCIF_RCAR_GEN3 },
- { .compatible = "renesas,rcar-gen4-rpc-if", .data = (void *)RPCIF_RCAR_GEN4 },
- { .compatible = "renesas,rzg2l-rpc-if", .data = (void *)RPCIF_RZ_G2L },
+ { .compatible = "renesas,r8a7796-rpc-if", .data = &rpcif_info_r8a7796 },
+ { .compatible = "renesas,rcar-gen3-rpc-if", .data = &rpcif_info_gen3 },
+ { .compatible = "renesas,rcar-gen4-rpc-if", .data = &rpcif_info_gen4 },
+ { .compatible = "renesas,rzg2l-rpc-if", .data = &rpcif_info_rz_g2l },
{},
};
MODULE_DEVICE_TABLE(of, rpcif_of_match);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 00c33edb9fb9..d920c4178389 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -264,6 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
@@ -651,6 +652,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
idatas[0] = idata;
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
blk_execute_rq(req, false);
@@ -722,6 +724,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
}
req_to_mmc_queue_req(req)->drv_op =
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = n;
blk_execute_rq(req, false);
@@ -2806,6 +2809,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
@@ -2844,6 +2848,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
goto out_free;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index b24aa27da50c..d2f625054689 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -540,9 +540,11 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
if (host->mmc->caps & MMC_CAP_HW_RESET) {
priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
- if (IS_ERR(priv->rst_hw))
- return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
- "reset controller error\n");
+ if (IS_ERR(priv->rst_hw)) {
+ ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
+ "reset controller error\n");
+ goto free;
+ }
if (priv->rst_hw)
host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
}
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index e5c571fd232c..80f015cf6e54 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -47,7 +47,7 @@ config MUX_GPIO
config MUX_MMIO
tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
- depends on OF || COMPILE_TEST
+ depends on OF
help
MMIO/Regmap register bitfield-controlled Multiplexer controller.
diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c
index 44a7a0e885b8..245bc07eee4b 100644
--- a/drivers/mux/mmio.c
+++ b/drivers/mux/mmio.c
@@ -131,7 +131,7 @@ static int mux_mmio_probe(struct platform_device *pdev)
static struct platform_driver mux_mmio_driver = {
.driver = {
.name = "mmio-mux",
- .of_match_table = of_match_ptr(mux_mmio_dt_ids),
+ .of_match_table = mux_mmio_dt_ids,
},
.probe = mux_mmio_probe,
};
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 7eb2ddbe9bad..a317feb8decb 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1126,8 +1126,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
poll:
- lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
+ lmac->check_link = alloc_ordered_workqueue("check_link", WQ_MEM_RECLAIM);
if (!lmac->check_link)
return -ENOMEM;
INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 34aebf00a512..18d4af934d8c 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -13,6 +13,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
+#include <linux/bitfield.h>
#include "igc_hw.h"
@@ -311,6 +312,33 @@ extern char igc_driver_name[];
#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+/* RX-desc Write-Back format RSS Type's */
+enum igc_rss_type_num {
+ IGC_RSS_TYPE_NO_HASH = 0,
+ IGC_RSS_TYPE_HASH_TCP_IPV4 = 1,
+ IGC_RSS_TYPE_HASH_IPV4 = 2,
+ IGC_RSS_TYPE_HASH_TCP_IPV6 = 3,
+ IGC_RSS_TYPE_HASH_IPV6_EX = 4,
+ IGC_RSS_TYPE_HASH_IPV6 = 5,
+ IGC_RSS_TYPE_HASH_TCP_IPV6_EX = 6,
+ IGC_RSS_TYPE_HASH_UDP_IPV4 = 7,
+ IGC_RSS_TYPE_HASH_UDP_IPV6 = 8,
+ IGC_RSS_TYPE_HASH_UDP_IPV6_EX = 9,
+ IGC_RSS_TYPE_MAX = 10,
+};
+#define IGC_RSS_TYPE_MAX_TABLE 16
+#define IGC_RSS_TYPE_MASK GENMASK(3,0) /* 4-bits (3:0) = mask 0x0F */
+
+/* igc_rss_type - Rx descriptor RSS type field */
+static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
+{
+ /* RSS Type 4-bits (3:0) number: 0-9 (above 9 is reserved)
+ * Accessing the same bits via u16 (wb.lower.lo_dword.hs_rss.pkt_info)
+ * is slightly slower than via u32 (wb.lower.lo_dword.data)
+ */
+ return le32_get_bits(rx_desc->wb.lower.lo_dword.data, IGC_RSS_TYPE_MASK);
+}
+
/* Interrupt defines */
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
@@ -471,6 +499,13 @@ struct igc_rx_buffer {
};
};
+/* context wrapper around xdp_buff to provide access to descriptor metadata */
+struct igc_xdp_buff {
+ struct xdp_buff xdp;
+ union igc_adv_rx_desc *rx_desc;
+ ktime_t rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
+};
+
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 1c4676882082..38d113b48111 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1690,14 +1690,36 @@ static void igc_rx_checksum(struct igc_ring *ring,
le32_to_cpu(rx_desc->wb.upper.status_error));
}
+/* Mapping HW RSS Type to enum pkt_hash_types */
+static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
+ [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
+ [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
+ [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
+ [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
+ [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
+ [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
+ [13] = PKT_HASH_TYPE_NONE,
+ [14] = PKT_HASH_TYPE_NONE,
+ [15] = PKT_HASH_TYPE_NONE,
+};
+
static inline void igc_rx_hash(struct igc_ring *ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (ring->netdev->features & NETIF_F_RXHASH)
- skb_set_hash(skb,
- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
- PKT_HASH_TYPE_L3);
+ if (ring->netdev->features & NETIF_F_RXHASH) {
+ u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ u32 rss_type = igc_rss_type(rx_desc);
+
+ skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
+ }
}
static void igc_rx_vlan(struct igc_ring *rx_ring,
@@ -2214,6 +2236,8 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
if (!count)
return ok;
+ XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
+
desc = IGC_RX_DESC(ring, i);
bi = &ring->rx_buffer_info[i];
i -= ring->count;
@@ -2498,8 +2522,8 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *rx_desc;
struct igc_rx_buffer *rx_buffer;
unsigned int size, truesize;
+ struct igc_xdp_buff ctx;
ktime_t timestamp = 0;
- struct xdp_buff xdp;
int pkt_offset = 0;
void *pktbuf;
@@ -2528,18 +2552,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
pktbuf);
+ ctx.rx_ts = timestamp;
pkt_offset = IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
}
if (!skb) {
- xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
- xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
+ xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
- xdp_buff_clear_frags_flag(&xdp);
+ xdp_buff_clear_frags_flag(&ctx.xdp);
+ ctx.rx_desc = rx_desc;
- skb = igc_xdp_run_prog(adapter, &xdp);
+ skb = igc_xdp_run_prog(adapter, &ctx.xdp);
}
if (IS_ERR(skb)) {
@@ -2561,9 +2587,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
+ skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
else
- skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
+ skb = igc_construct_skb(rx_ring, rx_buffer, &ctx.xdp,
timestamp);
/* exit if we failed to retrieve a buffer */
@@ -2664,6 +2690,15 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb);
}
+static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
+{
+ /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
+ * igc_xdp_buff shares its layout with xdp_buff_xsk and private
+ * igc_xdp_buff fields fall into xdp_buff_xsk->cb
+ */
+ return (struct igc_xdp_buff *)xdp;
+}
+
static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
{
struct igc_adapter *adapter = q_vector->adapter;
@@ -2682,6 +2717,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
+ struct igc_xdp_buff *ctx;
ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2699,9 +2735,13 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
bi = &ring->rx_buffer_info[ntc];
+ ctx = xsk_buff_to_igc_ctx(bi->xdp);
+ ctx->rx_desc = desc;
+
if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
bi->xdp->data);
+ ctx->rx_ts = timestamp;
bi->xdp->data += IGC_TS_HDR_LEN;
@@ -6454,6 +6494,58 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
+/* Mapping HW RSS Type to enum xdp_rss_hash_type */
+static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
+ [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
+ [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
+ [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
+ [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
+ [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
+ [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
+ [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
+ [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
+ [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
+ [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
+ [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
+ [13] = XDP_RSS_TYPE_NONE,
+ [14] = XDP_RSS_TYPE_NONE,
+ [15] = XDP_RSS_TYPE_NONE,
+};
+
+static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct igc_xdp_buff *ctx = (void *)_ctx;
+
+ if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
+ return -ENODATA;
+
+ *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
+ *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
+
+ return 0;
+}
+
+static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct igc_xdp_buff *ctx = (void *)_ctx;
+
+ if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
+ *timestamp = ctx->rx_ts;
+
+ return 0;
+ }
+
+ return -ENODATA;
+}
+
+static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
+ .xmo_rx_hash = igc_xdp_rx_hash,
+ .xmo_rx_timestamp = igc_xdp_rx_timestamp,
+};
+
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -6527,6 +6619,7 @@ static int igc_probe(struct pci_dev *pdev,
hw->hw_addr = adapter->io_addr;
netdev->netdev_ops = &igc_netdev_ops;
+ netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -6554,6 +6647,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_TSO_ECN;
+ netdev->features |= NETIF_F_RXHASH;
netdev->features |= NETIF_F_RXCSUM;
netdev->features |= NETIF_F_HW_CSUM;
netdev->features |= NETIF_F_SCTP_CRC;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 9f673bda9dbd..0069e60afa3b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -3044,9 +3044,8 @@ static int rvu_flr_init(struct rvu *rvu)
cfg | BIT_ULL(22));
}
- rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- 1);
+ rvu->flr_wq = alloc_ordered_workqueue("rvu_afpf_flr",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!rvu->flr_wq)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 18284ad75157..74c49795dc82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -271,8 +271,7 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
{
int vf;
- pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
- WQ_UNBOUND | WQ_HIGHPRI, 1);
+ pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
if (!pf->flr_wq)
return -ENOMEM;
@@ -593,9 +592,8 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
- pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI |
- WQ_MEM_RECLAIM, 1);
+ pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@@ -1063,9 +1061,8 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
int err;
mbox->pfvf = pf;
- pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI |
- WQ_MEM_RECLAIM, 1);
+ pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!pf->mbox_wq)
return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 53366dbfbf27..7baed6bb3b72 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -297,9 +297,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
int err;
mbox->pfvf = vf;
- vf->mbox_wq = alloc_workqueue("otx2_vfaf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI |
- WQ_MEM_RECLAIM, 1);
+ vf->mbox_wq = alloc_ordered_workqueue("otx2_vfaf_mailbox",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!vf->mbox_wq)
return -ENOMEM;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0134ebac227a..a2d242034220 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2945,7 +2945,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
ca8210_hw->phy->flags =
WPAN_PHY_FLAG_TXPOWER |
WPAN_PHY_FLAG_CCA_ED_LEVEL |
- WPAN_PHY_FLAG_CCA_MODE;
+ WPAN_PHY_FLAG_CCA_MODE |
+ WPAN_PHY_FLAG_DATAGRAMS_ONLY;
}
/**
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 8445c2189d11..31cba9aa7636 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -685,7 +685,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
{
struct nlattr *edge_attrs[MAC802154_HWSIM_EDGE_ATTR_MAX + 1];
- struct hwsim_edge_info *einfo;
+ struct hwsim_edge_info *einfo, *einfo_old;
struct hwsim_phy *phy_v0;
struct hwsim_edge *e;
u32 v0, v1;
@@ -723,8 +723,10 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
list_for_each_entry_rcu(e, &phy_v0->edges, list) {
if (e->endpoint->idx == v1) {
einfo->lqi = lqi;
- rcu_assign_pointer(e->info, einfo);
+ einfo_old = rcu_replace_pointer(e->info, einfo,
+ lockdep_is_held(&hwsim_phys_lock));
rcu_read_unlock();
+ kfree_rcu(einfo_old, rcu);
mutex_unlock(&hwsim_phys_lock);
return 0;
}
diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index 68fc55906e78..554837c21e73 100644
--- a/drivers/net/mdio/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
@@ -67,6 +67,7 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
struct device *dev = &interface->dev;
struct mvusb_mdio *mvusb;
struct mii_bus *mdio;
+ int ret;
mdio = devm_mdiobus_alloc_size(dev, sizeof(*mvusb));
if (!mdio)
@@ -87,7 +88,15 @@ static int mvusb_mdio_probe(struct usb_interface *interface,
mdio->write = mvusb_mdio_write;
usb_set_intfdata(interface, mvusb);
- return of_mdiobus_register(mdio, dev->of_node);
+ ret = of_mdiobus_register(mdio, dev->of_node);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ usb_put_dev(mvusb->udev);
+ return ret;
}
static void mvusb_mdio_disconnect(struct usb_interface *interface)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b281850fbf7a..59e14b398295 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3576,7 +3576,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->imr_waitq);
trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
+ WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!trans_pcie->rba.alloc_wq) {
ret = -ENOMEM;
goto out_free_trans;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index 94b5e3e4ba08..7738ebe1fec1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -102,14 +102,14 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
{
struct mwifiex_private *priv;
u8 i;
- u32 ba_stream_num = 0, ba_stream_max;
+ size_t ba_stream_num = 0, ba_stream_max;
ba_stream_max = MWIFIEX_MAX_TX_BASTREAM_SUPPORTED;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (priv)
- ba_stream_num += mwifiex_wmm_list_len(
+ ba_stream_num += list_count_nodes(
&priv->tx_ba_stream_tbl_ptr);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 4f53a271dae0..d7659e688933 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -39,21 +39,6 @@ mwifiex_get_tid(struct mwifiex_ra_list_tbl *ptr)
}
/*
- * This function gets the length of a list.
- */
-static inline int
-mwifiex_wmm_list_len(struct list_head *head)
-{
- struct list_head *pos;
- int count = 0;
-
- list_for_each(pos, head)
- ++count;
-
- return count;
-}
-
-/*
* This function checks if a RA list is empty or not.
*/
static inline u8
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 230b0e1061a7..dbddf256921b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -127,8 +127,6 @@ void mt7601u_init_debugfs(struct mt7601u_dev *dev)
struct dentry *dir;
dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
- if (!dir)
- return;
debugfs_create_u8("temperature", 0400, dir, &dev->raw_temp);
debugfs_create_u32("temp_mode", 0400, dir, &dev->temp_mode);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 8eafbf1cee71..2c756640f59d 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1280,6 +1280,9 @@ struct rtl8xxxu_rfregs {
#define H2C_JOIN_BSS_DISCONNECT 0
#define H2C_JOIN_BSS_CONNECT 1
+#define H2C_MACID_ROLE_STA 1
+#define H2C_MACID_ROLE_AP 2
+
/*
* H2C (firmware) commands differ between the older generation chips
* 8188[cr]u, 819[12]cu, and 8723au, and the more recent chips 8723bu,
@@ -1727,6 +1730,8 @@ struct rtl8xxxu_cfo_tracking {
};
#define RTL8XXXU_HW_LED_CONTROL 2
+#define RTL8XXXU_MAX_MAC_ID_NUM 128
+#define RTL8XXXU_BC_MC_MACID 0
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
@@ -1850,6 +1855,7 @@ struct rtl8xxxu_priv {
struct delayed_work ra_watchdog;
struct work_struct c2hcmd_work;
struct sk_buff_head c2hcmd_queue;
+ struct work_struct update_beacon_work;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
@@ -1858,6 +1864,14 @@ struct rtl8xxxu_priv {
bool led_registered;
char led_name[32];
struct led_classdev led_cdev;
+ DECLARE_BITMAP(mac_id_map, RTL8XXXU_MAX_MAC_ID_NUM);
+};
+
+struct rtl8xxxu_sta_info {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+
+ u8 macid;
};
struct rtl8xxxu_rx_urb {
@@ -1902,15 +1916,16 @@ struct rtl8xxxu_fileops {
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz,
+ u8 macid);
void (*report_connect) (struct rtl8xxxu_priv *priv,
- u8 macid, bool connect);
+ u8 macid, u8 role, bool connect);
void (*report_rssi) (struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
bool short_preamble, bool ampdu_enable,
- u32 rts_rate);
+ u32 rts_rate, u8 macid);
void (*set_crystal_cap) (struct rtl8xxxu_priv *priv, u8 crystal_cap);
s8 (*cck_rssi) (struct rtl8xxxu_priv *priv, struct rtl8723au_phy_stats *phy_stats);
int (*led_classdev_brightness_set) (struct led_classdev *led_cdev,
@@ -1928,6 +1943,8 @@ struct rtl8xxxu_fileops {
u8 init_reg_hmtfr:1;
u8 ampdu_max_time;
u8 ustime_tsf_edca;
+ u8 supports_ap:1;
+ u16 max_macid_num;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
@@ -2021,13 +2038,13 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz, u8 macid);
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz, u8 macid);
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect);
+ u8 macid, u8 role, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect);
+ u8 macid, u8 role, bool connect);
void rtl8xxxu_gen1_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void rtl8xxxu_gen2_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv);
@@ -2056,17 +2073,17 @@ void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
bool short_preamble, bool ampdu_enable,
- u32 rts_rate);
+ u32 rts_rate, u8 macid);
void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
- u32 rts_rate);
+ u32 rts_rate, u8 macid);
void rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
- u32 rts_rate);
+ u32 rts_rate, u8 macid);
void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
index 8986783ae8fa..6d0f975f891b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -1794,7 +1794,8 @@ static void rtl8188e_arfb_refresh(struct rtl8xxxu_ra_info *ra)
static void
rtl8188e_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz,
+ u8 macid)
{
struct rtl8xxxu_ra_info *ra = &priv->ra_info;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index dbdfd7787465..71b7f0d31bf4 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -1748,6 +1748,8 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.init_reg_hmtfr = 1,
.ampdu_max_time = 0x70,
.ustime_tsf_edca = 0x28,
+ .supports_ap = 1,
+ .max_macid_num = 16,
.adda_1t_init = 0x03c00014,
.adda_1t_path_on = 0x03c00014,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index fd8c8c6d53d6..37df47c27a69 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1185,6 +1185,20 @@ static void rtl8xxxu_stop_tx_beacon(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8);
}
+static void rtl8xxxu_start_tx_beacon(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL + 2);
+ val8 |= EN_BCNQ_DL >> 16;
+ rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL + 2, val8);
+
+ rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 1, 0x80);
+ val8 = rtl8xxxu_read8(priv, REG_TBTT_PROHIBIT + 2);
+ val8 &= 0xF0;
+ rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8);
+}
+
/*
* The rtl8723a has 3 channel groups for it's efuse settings. It only
@@ -3963,6 +3977,34 @@ void rtl8xxxu_init_burst(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
}
+static u8 rtl8xxxu_acquire_macid(struct rtl8xxxu_priv *priv)
+{
+ u8 macid;
+
+ macid = find_first_zero_bit(priv->mac_id_map, RTL8XXXU_MAX_MAC_ID_NUM);
+ if (macid < RTL8XXXU_MAX_MAC_ID_NUM)
+ set_bit(macid, priv->mac_id_map);
+
+ return macid;
+}
+
+static void rtl8xxxu_release_macid(struct rtl8xxxu_priv *priv, u8 macid)
+{
+ clear_bit(macid, priv->mac_id_map);
+}
+
+static inline u8 rtl8xxxu_get_macid(struct rtl8xxxu_priv *priv,
+ struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_sta_info *sta_info;
+
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION || !sta)
+ return 0;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ return sta_info->macid;
+}
+
static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
@@ -4432,6 +4474,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (priv->rtl_chip == RTL8188E)
rtl8188e_ra_info_init_all(&priv->ra_info);
+ set_bit(RTL8XXXU_BC_MC_MACID, priv->mac_id_map);
+
exit:
return ret;
}
@@ -4489,6 +4533,16 @@ int rtl8xxxu_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
+static int rtl8xxxu_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ schedule_work(&priv->update_beacon_work);
+
+ return 0;
+}
+
static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, const u8 *mac)
{
@@ -4512,7 +4566,8 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
}
void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz,
+ u8 macid)
{
struct h2c_cmd h2c;
@@ -4532,7 +4587,8 @@ void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
}
void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz,
+ u8 macid)
{
struct h2c_cmd h2c;
u8 bw;
@@ -4549,6 +4605,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
h2c.b_macid_cfg.ramask1 = (ramask >> 8) & 0xff;
h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff;
h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff;
+ h2c.b_macid_cfg.macid = macid;
h2c.b_macid_cfg.data1 = rateid;
if (sgi)
@@ -4562,7 +4619,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
}
void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
+ u8 macid, u8 role, bool connect)
{
struct h2c_cmd h2c;
@@ -4579,7 +4636,7 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
}
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
+ u8 macid, u8 role, bool connect)
{
/*
* The firmware turns on the rate control when it knows it's
@@ -4595,6 +4652,9 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
else
h2c.media_status_rpt.parm &= ~BIT(0);
+ h2c.media_status_rpt.parm |= ((role << 4) & 0xf0);
+ h2c.media_status_rpt.macid = macid;
+
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
@@ -4911,7 +4971,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
priv->vif = vif;
priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
- priv->fops->update_rate_mask(priv, ramask, 0, sgi, bw == RATE_INFO_BW_40);
+ priv->fops->update_rate_mask(priv, ramask, 0, sgi,
+ bw == RATE_INFO_BW_40, 0);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
@@ -4921,13 +4982,13 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
0xc000 | vif->cfg.aid);
- priv->fops->report_connect(priv, 0, true);
+ priv->fops->report_connect(priv, 0, H2C_MACID_ROLE_AP, true);
} else {
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
val8 |= BEACON_DISABLE_TSF_UPDATE;
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
- priv->fops->report_connect(priv, 0, false);
+ priv->fops->report_connect(priv, 0, H2C_MACID_ROLE_AP, false);
}
}
@@ -4964,10 +5025,35 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
dev_dbg(dev, "Changed BASIC_RATES!\n");
rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates);
}
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (bss_conf->enable_beacon)
+ rtl8xxxu_start_tx_beacon(priv);
+ else
+ rtl8xxxu_stop_tx_beacon(priv);
+ }
+
+ if (changed & BSS_CHANGED_BEACON)
+ schedule_work(&priv->update_beacon_work);
+
error:
return;
}
+static int rtl8xxxu_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+
+ dev_dbg(dev, "Start AP mode\n");
+ rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid);
+ rtl8xxxu_write16(priv, REG_BCN_INTERVAL, vif->bss_conf.beacon_int);
+ priv->fops->report_connect(priv, RTL8XXXU_BC_MC_MACID, 0, true);
+
+ return 0;
+}
+
static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
{
u32 rtlqueue;
@@ -4996,7 +5082,9 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hdr *hdr, struct sk_buff *skb)
{
u32 queue;
- if (ieee80211_is_mgmt(hdr->frame_control))
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control)))
+ queue = TXDESC_QUEUE_BEACON;
+ else if (ieee80211_is_mgmt(hdr->frame_control))
queue = TXDESC_QUEUE_MGNT;
else
queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
@@ -5159,23 +5247,16 @@ void
rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
- bool short_preamble, bool ampdu_enable, u32 rts_rate)
+ bool short_preamble, bool ampdu_enable, u32 rts_rate,
+ u8 macid)
{
- struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- u32 rate;
- u16 rate_flags = tx_info->control.rates[0].flags;
+ u32 rate = 0;
u16 seq_number;
- if (rate_flags & IEEE80211_TX_RC_MCS &&
- !ieee80211_is_mgmt(hdr->frame_control))
- rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
- else
- rate = tx_rate->hw_value;
-
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
dev_info(dev, "%s: TX rate: %d, pkt size %u\n",
__func__, rate, le16_to_cpu(tx_desc->pkt_size));
@@ -5214,10 +5295,10 @@ rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
- if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+ if (ampdu_enable || tx_info->control.use_rts) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
- } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ } else if (tx_info->control.use_cts_prot) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
}
@@ -5231,30 +5312,25 @@ void
rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
- bool short_preamble, bool ampdu_enable, u32 rts_rate)
+ bool short_preamble, bool ampdu_enable, u32 rts_rate,
+ u8 macid)
{
- struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
struct rtl8xxxu_txdesc40 *tx_desc40;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- u32 rate;
- u16 rate_flags = tx_info->control.rates[0].flags;
+ u32 rate = 0;
u16 seq_number;
tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32;
- if (rate_flags & IEEE80211_TX_RC_MCS &&
- !ieee80211_is_mgmt(hdr->frame_control))
- rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
- else
- rate = tx_rate->hw_value;
-
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
dev_info(dev, "%s: TX rate: %d, pkt size %u\n",
__func__, rate, le16_to_cpu(tx_desc40->pkt_size));
+ tx_desc40->txdw1 |= cpu_to_le32(macid << TXDESC40_MACID_SHIFT);
+
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
tx_desc40->txdw4 = cpu_to_le32(rate);
@@ -5278,17 +5354,21 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
tx_desc40->txdw4 |= cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
}
+ if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+ tx_desc40->txdw8 |= cpu_to_le32(TXDESC40_HW_SEQ_ENABLE);
+
if (short_preamble)
tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
tx_desc40->txdw4 |= cpu_to_le32(rts_rate << TXDESC40_RTS_RATE_SHIFT);
+
/*
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
- if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+ if (ampdu_enable || tx_info->control.use_rts) {
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
- } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ } else if (tx_info->control.use_cts_prot) {
/*
* For some reason the vendor driver doesn't set
* TXDESC40_HW_RTS_ENABLE for CTS to SELF
@@ -5306,24 +5386,17 @@ void
rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
- bool short_preamble, bool ampdu_enable, u32 rts_rate)
+ bool short_preamble, bool ampdu_enable, u32 rts_rate,
+ u8 macid)
{
- struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
struct rtl8xxxu_ra_info *ra = &priv->ra_info;
u8 *qc = ieee80211_get_qos_ctl(hdr);
u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- u32 rate;
- u16 rate_flags = tx_info->control.rates[0].flags;
+ u32 rate = 0;
u16 seq_number;
- if (rate_flags & IEEE80211_TX_RC_MCS &&
- !ieee80211_is_mgmt(hdr->frame_control))
- rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
- else
- rate = tx_rate->hw_value;
-
seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
if (ieee80211_is_data(hdr->frame_control)) {
@@ -5376,10 +5449,10 @@ rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
* rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
*/
tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
- if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+ if (ampdu_enable || tx_info->control.use_rts) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
- } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ } else if (tx_info->control.use_cts_prot) {
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE);
tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
}
@@ -5403,8 +5476,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
- u16 rate_flag = tx_info->control.rates[0].flags;
int tx_desc_size = priv->fops->tx_desc_size;
+ u8 macid;
int ret;
bool ampdu_enable, sgi = false, short_preamble = false;
@@ -5487,26 +5560,29 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
}
}
- if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
- (ieee80211_is_data_qos(hdr->frame_control) &&
- sta && sta->deflink.ht_cap.cap &
- (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)))
+ if (ieee80211_is_data_qos(hdr->frame_control) &&
+ sta && sta->deflink.ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = true;
- if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
- (sta && vif && vif->bss_conf.use_short_preamble))
+ if (sta && vif && vif->bss_conf.use_short_preamble)
short_preamble = true;
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS)
- rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value;
- else if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT)
- rts_rate = ieee80211_get_rts_cts_rate(hw, tx_info)->hw_value;
+ if (skb->len > hw->wiphy->rts_threshold)
+ tx_info->control.use_rts = true;
+
+ if (sta && vif && vif->bss_conf.use_cts_prot)
+ tx_info->control.use_cts_prot = true;
+
+ if (ampdu_enable || tx_info->control.use_rts ||
+ tx_info->control.use_cts_prot)
+ rts_rate = DESC_RATE_24M;
else
rts_rate = 0;
-
+ macid = rtl8xxxu_get_macid(priv, sta);
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
- ampdu_enable, rts_rate);
+ ampdu_enable, rts_rate, macid);
rtl8xxxu_calc_tx_desc_csum(tx_desc);
@@ -5529,6 +5605,55 @@ error:
dev_kfree_skb(skb);
}
+static void rtl8xxxu_send_beacon_frame(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif, 0);
+ struct device *dev = &priv->udev->dev;
+ int retry;
+ u8 val8;
+
+ /* BCN_VALID, write 1 to clear, cleared by SW */
+ val8 = rtl8xxxu_read8(priv, REG_TDECTRL + 2);
+ val8 |= BIT_BCN_VALID >> 16;
+ rtl8xxxu_write8(priv, REG_TDECTRL + 2, val8);
+
+ /* SW_BCN_SEL - Port0 */
+ val8 = rtl8xxxu_read8(priv, REG_DWBCN1_CTRL_8723B + 2);
+ val8 &= ~(BIT_SW_BCN_SEL >> 16);
+ rtl8xxxu_write8(priv, REG_DWBCN1_CTRL_8723B + 2, val8);
+
+ if (skb)
+ rtl8xxxu_tx(hw, NULL, skb);
+
+ retry = 100;
+ do {
+ val8 = rtl8xxxu_read8(priv, REG_TDECTRL + 2);
+ if (val8 & (BIT_BCN_VALID >> 16))
+ break;
+ usleep_range(10, 20);
+ } while (--retry);
+
+ if (!retry)
+ dev_err(dev, "%s: Failed to read beacon valid bit\n", __func__);
+}
+
+static void rtl8xxxu_update_beacon_work_callback(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv =
+ container_of(work, struct rtl8xxxu_priv, update_beacon_work);
+ struct ieee80211_hw *hw = priv->hw;
+ struct ieee80211_vif *vif = priv->vif;
+
+ if (!vif) {
+ WARN_ONCE(true, "no vif to update beacon\n");
+ return;
+ }
+
+ rtl8xxxu_send_beacon_frame(hw, vif);
+}
+
void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
struct rtl8723au_phy_stats *phy_stats,
@@ -6197,61 +6322,98 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct rtl8xxxu_rxdesc24 *rx_desc =
- (struct rtl8xxxu_rxdesc24 *)skb->data;
+ struct ieee80211_rx_status *rx_status;
+ struct rtl8xxxu_rxdesc24 *rx_desc;
struct rtl8723au_phy_stats *phy_stats;
- __le32 *_rx_desc_le = (__le32 *)skb->data;
- u32 *_rx_desc = (u32 *)skb->data;
+ struct sk_buff *next_skb = NULL;
+ __le32 *_rx_desc_le;
+ u32 *_rx_desc;
int drvinfo_sz, desc_shift;
- int i;
+ int i, pkt_len, urb_len, pkt_offset;
+
+ urb_len = skb->len;
+
+ if (urb_len < sizeof(struct rtl8xxxu_rxdesc24)) {
+ kfree_skb(skb);
+ return RX_TYPE_ERROR;
+ }
- for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
- _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+ do {
+ rx_desc = (struct rtl8xxxu_rxdesc24 *)skb->data;
+ _rx_desc_le = (__le32 *)skb->data;
+ _rx_desc = (u32 *)skb->data;
- memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+ for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
- skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
+ pkt_len = rx_desc->pktlen;
- phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+ drvinfo_sz = rx_desc->drvinfo_sz * 8;
+ desc_shift = rx_desc->shift;
+ pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
+ sizeof(struct rtl8xxxu_rxdesc24), 8);
- drvinfo_sz = rx_desc->drvinfo_sz * 8;
- desc_shift = rx_desc->shift;
- skb_pull(skb, drvinfo_sz + desc_shift);
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (urb_len >= (pkt_offset + sizeof(struct rtl8xxxu_rxdesc24)))
+ next_skb = skb_clone(skb, GFP_ATOMIC);
- if (rx_desc->rpt_sel) {
- struct device *dev = &priv->udev->dev;
- dev_dbg(dev, "%s: C2H packet\n", __func__);
- rtl8723bu_handle_c2h(priv, skb);
- return RX_TYPE_C2H;
- }
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rx_desc->phy_stats)
- priv->fops->parse_phystats(priv, rx_status, phy_stats,
- rx_desc->rxmcs, (struct ieee80211_hdr *)skb->data,
- rx_desc->crc32 || rx_desc->icverr);
+ skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
- rx_status->mactime = rx_desc->tsfl;
- rx_status->flag |= RX_FLAG_MACTIME_START;
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
- if (!rx_desc->swdec)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (rx_desc->crc32)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rx_desc->bw)
- rx_status->bw = RATE_INFO_BW_40;
+ skb_pull(skb, drvinfo_sz + desc_shift);
- if (rx_desc->rxmcs >= DESC_RATE_MCS0) {
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
- } else {
- rx_status->rate_idx = rx_desc->rxmcs;
- }
+ skb_trim(skb, pkt_len);
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
+ if (rx_desc->rpt_sel) {
+ struct device *dev = &priv->udev->dev;
+ dev_dbg(dev, "%s: C2H packet\n", __func__);
+ rtl8723bu_handle_c2h(priv, skb);
+ } else {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (rx_desc->phy_stats)
+ priv->fops->parse_phystats(priv, rx_status, phy_stats,
+ rx_desc->rxmcs, hdr,
+ rx_desc->crc32 || rx_desc->icverr);
+
+ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->bw = RATE_INFO_BW_40;
+
+ if (rx_desc->rxmcs >= DESC_RATE_MCS0) {
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
+
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+
+ ieee80211_rx_irqsafe(hw, skb);
+ }
+
+ skb = next_skb;
+ if (skb)
+ skb_pull(next_skb, pkt_offset);
+
+ urb_len -= pkt_offset;
+ next_skb = NULL;
+ } while (skb && urb_len >= sizeof(struct rtl8xxxu_rxdesc24));
- ieee80211_rx_irqsafe(hw, skb);
return RX_TYPE_DATA_PKT;
}
@@ -6281,7 +6443,6 @@ static void rtl8xxxu_rx_complete(struct urb *urb)
cleanup:
usb_free_urb(urb);
dev_kfree_skb(skb);
- return;
}
static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
@@ -6371,12 +6532,13 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
int ret;
u8 val8;
+ if (!priv->vif)
+ priv->vif = vif;
+ else
+ return -EOPNOTSUPP;
+
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- if (!priv->vif)
- priv->vif = vif;
- else
- return -EOPNOTSUPP;
rtl8xxxu_stop_tx_beacon(priv);
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
@@ -6385,11 +6547,33 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
ret = 0;
break;
+ case NL80211_IFTYPE_AP:
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL,
+ BEACON_DISABLE_TSF_UPDATE | BEACON_CTRL_MBSSID);
+ rtl8xxxu_write8(priv, REG_ATIMWND, 0x0c); /* 12ms */
+ rtl8xxxu_write16(priv, REG_TSFTR_SYN_OFFSET, 0x7fff); /* ~32ms */
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, DUAL_TSF_RESET_TSF0);
+
+ /* enable BCN0 function */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL,
+ BEACON_DISABLE_TSF_UPDATE |
+ BEACON_FUNCTION_ENABLE | BEACON_CTRL_MBSSID |
+ BEACON_CTRL_TX_BEACON_RPT);
+
+ /* select BCN on port 0 */
+ val8 = rtl8xxxu_read8(priv, REG_CCK_CHECK);
+ val8 &= ~BIT_BCN_PORT_SEL;
+ rtl8xxxu_write8(priv, REG_CCK_CHECK, val8);
+
+ ret = 0;
+ break;
default:
ret = -EOPNOTSUPP;
}
rtl8xxxu_set_linktype(priv, vif->type);
+ ether_addr_copy(priv->mac_addr, vif->addr);
+ rtl8xxxu_set_mac(priv);
return ret;
}
@@ -6520,22 +6704,22 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
*/
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
- rcr &= ~RCR_CHECK_BSSID_BEACON;
+ rcr &= ~(RCR_CHECK_BSSID_BEACON | RCR_CHECK_BSSID_MATCH);
else
- rcr |= RCR_CHECK_BSSID_BEACON;
+ rcr |= RCR_CHECK_BSSID_BEACON | RCR_CHECK_BSSID_MATCH;
+
+ if (priv->vif && priv->vif->type == NL80211_IFTYPE_AP)
+ rcr &= ~RCR_CHECK_BSSID_MATCH;
if (*total_flags & FIF_CONTROL)
rcr |= RCR_ACCEPT_CTRL_FRAME;
else
rcr &= ~RCR_ACCEPT_CTRL_FRAME;
- if (*total_flags & FIF_OTHER_BSS) {
+ if (*total_flags & FIF_OTHER_BSS)
rcr |= RCR_ACCEPT_AP;
- rcr &= ~RCR_CHECK_BSSID_MATCH;
- } else {
+ else
rcr &= ~RCR_ACCEPT_AP;
- rcr |= RCR_CHECK_BSSID_MATCH;
- }
if (*total_flags & FIF_PSPOLL)
rcr |= RCR_ACCEPT_PM;
@@ -6555,7 +6739,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
{
- if (rts > 2347)
+ if (rts > 2347 && rts != (u32)-1)
return -EINVAL;
return 0;
@@ -6704,7 +6888,8 @@ static u8 rtl8xxxu_signal_to_snr(int signal)
}
static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
- int signal, struct ieee80211_sta *sta)
+ int signal, struct ieee80211_sta *sta,
+ bool force)
{
struct ieee80211_hw *hw = priv->hw;
u16 wireless_mode;
@@ -6712,6 +6897,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u8 txbw_40mhz;
u8 snr, snr_thresh_high, snr_thresh_low;
u8 go_up_gap = 5;
+ u8 macid = rtl8xxxu_get_macid(priv, sta);
rssi_level = priv->rssi_level;
snr = rtl8xxxu_signal_to_snr(signal);
@@ -6738,7 +6924,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
else
rssi_level = RTL8XXXU_RATR_STA_LOW;
- if (rssi_level != priv->rssi_level) {
+ if (rssi_level != priv->rssi_level || force) {
int sgi = 0;
u32 rate_bitmap = 0;
@@ -6831,7 +7017,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
}
priv->rssi_level = rssi_level;
- priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz);
+ priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz, macid);
}
}
@@ -6954,7 +7140,7 @@ static void rtl8xxxu_watchdog_callback(struct work_struct *work)
if (priv->fops->set_crystal_cap)
rtl8xxxu_track_cfo(priv);
- rtl8xxxu_refresh_rate_mask(priv, signal, sta);
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
}
out:
@@ -7085,6 +7271,38 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
rtl8xxxu_free_tx_resources(priv);
}
+static int rtl8xxxu_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ sta_info->macid = rtl8xxxu_acquire_macid(priv);
+ if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+
+ rtl8xxxu_refresh_rate_mask(priv, 0, sta, true);
+ priv->fops->report_connect(priv, sta_info->macid, H2C_MACID_ROLE_STA, true);
+ }
+
+ return 0;
+}
+
+static int rtl8xxxu_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ rtl8xxxu_release_macid(priv, sta_info->macid);
+
+ return 0;
+}
+
static const struct ieee80211_ops rtl8xxxu_ops = {
.tx = rtl8xxxu_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -7093,6 +7311,7 @@ static const struct ieee80211_ops rtl8xxxu_ops = {
.config = rtl8xxxu_config,
.conf_tx = rtl8xxxu_conf_tx,
.bss_info_changed = rtl8xxxu_bss_info_changed,
+ .start_ap = rtl8xxxu_start_ap,
.configure_filter = rtl8xxxu_configure_filter,
.set_rts_threshold = rtl8xxxu_set_rts_threshold,
.start = rtl8xxxu_start,
@@ -7103,6 +7322,9 @@ static const struct ieee80211_ops rtl8xxxu_ops = {
.ampdu_action = rtl8xxxu_ampdu_action,
.sta_statistics = rtl8xxxu_sta_statistics,
.get_antenna = rtl8xxxu_get_antenna,
+ .set_tim = rtl8xxxu_set_tim,
+ .sta_add = rtl8xxxu_sta_add,
+ .sta_remove = rtl8xxxu_sta_remove,
};
static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
@@ -7294,6 +7516,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
+ INIT_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -7345,7 +7568,11 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+ if (priv->fops->max_macid_num)
+ hw->wiphy->max_ap_assoc_sta = priv->fops->max_macid_num - 1;
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ if (priv->fops->supports_ap)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
hw->queues = 4;
sband = &rtl8xxxu_supported_band;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 4dffbab494c3..8571d5129f32 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -456,6 +456,7 @@
#define REG_FIFOPAGE 0x0204
#define REG_TDECTRL 0x0208
+#define BIT_BCN_VALID BIT(16)
#define REG_DWBCN0_CTRL_8188F REG_TDECTRL
@@ -470,6 +471,7 @@
#define AUTO_LLT_INIT_LLT BIT(16)
#define REG_DWBCN1_CTRL_8723B 0x0228
+#define BIT_SW_BCN_SEL BIT(20)
/* 0x0280 ~ 0x02FF RXDMA Configuration */
#define REG_RXDMA_AGG_PG_TH 0x0280 /* 0-7 : USB DMA size bits
@@ -516,6 +518,7 @@
#define REG_FWHW_TXQ_CTRL 0x0420
#define FWHW_TXQ_CTRL_AMPDU_RETRY BIT(7)
#define FWHW_TXQ_CTRL_XMIT_MGMT_ACK BIT(12)
+#define EN_BCNQ_DL BIT(22)
#define REG_HWSEQ_CTRL 0x0423
#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
@@ -572,6 +575,8 @@
#define REG_ARFR1 0x0448
#define REG_ARFR2 0x044c
#define REG_ARFR3 0x0450
+#define REG_CCK_CHECK 0x0454
+#define BIT_BCN_PORT_SEL BIT(5)
#define REG_AMPDU_MAX_TIME_8723B 0x0456
#define REG_AGGLEN_LMT 0x0458
#define REG_AMPDU_MIN_SPACE 0x045c
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index fa3d73b333ba..f8ba133baff0 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -183,8 +183,8 @@ static int rtw_debugfs_copy_from_user(char tmp[], int size,
tmp_len = (count > size - 1 ? size - 1 : count);
- if (!buffer || copy_from_user(tmp, buffer, tmp_len))
- return count;
+ if (copy_from_user(tmp, buffer, tmp_len))
+ return -EFAULT;
tmp[tmp_len] = '\0';
@@ -201,13 +201,16 @@ static ssize_t rtw_debugfs_set_read_reg(struct file *filp,
char tmp[32 + 1];
u32 addr, len;
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+ if (ret)
+ return ret;
num = sscanf(tmp, "%x %x", &addr, &len);
if (num != 2)
- return count;
+ return -EINVAL;
if (len != 1 && len != 2 && len != 4) {
rtw_warn(rtwdev, "read reg setting wrong len\n");
@@ -288,8 +291,11 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
char tmp[32 + 1];
u32 offset, page_num;
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+ if (ret)
+ return ret;
num = sscanf(tmp, "%d %d", &offset, &page_num);
@@ -314,8 +320,11 @@ static ssize_t rtw_debugfs_set_single_input(struct file *filp,
char tmp[32 + 1];
u32 input;
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ if (ret)
+ return ret;
num = kstrtoint(tmp, 0, &input);
@@ -338,14 +347,17 @@ static ssize_t rtw_debugfs_set_write_reg(struct file *filp,
char tmp[32 + 1];
u32 addr, val, len;
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+ if (ret)
+ return ret;
/* write BB/MAC register */
num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
if (num != 3)
- return count;
+ return -EINVAL;
switch (len) {
case 1:
@@ -381,8 +393,11 @@ static ssize_t rtw_debugfs_set_h2c(struct file *filp,
char tmp[32 + 1];
u8 param[8];
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+ if (ret)
+ return ret;
num = sscanf(tmp, "%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx",
&param[0], &param[1], &param[2], &param[3],
@@ -408,14 +423,17 @@ static ssize_t rtw_debugfs_set_rf_write(struct file *filp,
char tmp[32 + 1];
u32 path, addr, mask, val;
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4);
+ if (ret)
+ return ret;
num = sscanf(tmp, "%x %x %x %x", &path, &addr, &mask, &val);
if (num != 4) {
rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
- return count;
+ return -EINVAL;
}
mutex_lock(&rtwdev->mutex);
@@ -438,14 +456,17 @@ static ssize_t rtw_debugfs_set_rf_read(struct file *filp,
char tmp[32 + 1];
u32 path, addr, mask;
int num;
+ int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+ if (ret)
+ return ret;
num = sscanf(tmp, "%x %x %x", &path, &addr, &mask);
if (num != 3) {
rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
- return count;
+ return -EINVAL;
}
debugfs_priv->rf_path = path;
@@ -467,7 +488,9 @@ static ssize_t rtw_debugfs_set_fix_rate(struct file *filp,
char tmp[32 + 1];
int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ if (ret)
+ return ret;
ret = kstrtou8(tmp, 0, &fix_rate);
if (ret) {
@@ -860,7 +883,9 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
bool enable;
int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ if (ret)
+ return ret;
ret = kstrtobool(tmp, &enable);
if (ret) {
@@ -930,7 +955,9 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
bool input;
int ret;
- rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ if (ret)
+ return ret;
ret = kstrtobool(tmp, &input);
if (ret)
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 7fc0a26a4d73..630124f9e025 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -1244,13 +1244,34 @@ static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
+ u8 ant_pos = U8_MAX;
+ u8 evm_pos = 0;
int i;
- if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self) {
- ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
+ if (rtwsta->mac_id != phy_ppdu->mac_id || !phy_ppdu->to_self)
+ return;
+
+ if (hal->ant_diversity && hal->antenna_rx) {
+ ant_pos = __ffs(hal->antenna_rx);
+ evm_pos = ant_pos;
+ }
+
+ ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
+
+ if (ant_pos < ant_num) {
+ ewma_rssi_add(&rtwsta->rssi[ant_pos], phy_ppdu->rssi[0]);
+ } else {
for (i = 0; i < rtwdev->chip->rf_path_num; i++)
ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
}
+
+ if (phy_ppdu->ofdm.has) {
+ ewma_snr_add(&rtwsta->avg_snr, phy_ppdu->ofdm.avg_snr);
+ ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
+ ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
+ }
}
#define VAR_LEN 0xff
@@ -1277,20 +1298,30 @@ static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev, u8 *addr)
static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr,
struct rtw89_rx_phy_ppdu *phy_ppdu)
{
+ const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)addr;
s16 cfo;
+ u32 t;
- phy_ppdu->chan_idx = RTW89_GET_PHY_STS_IE01_CH_IDX(addr);
+ phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX);
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
if (!phy_ppdu->to_self)
return;
+ phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
+ phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
+ phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
+ phy_ppdu->ofdm.has = true;
+
/* sign conversion for S(12,2) */
- if (rtwdev->chip->cfo_src_fd)
- cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_FD_CFO(addr), 11);
- else
- cfo = sign_extend32(RTW89_GET_PHY_STS_IE01_PREMB_CFO(addr), 11);
+ if (rtwdev->chip->cfo_src_fd) {
+ t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_FD_CFO);
+ cfo = sign_extend32(t, 11);
+ } else {
+ t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_PREMB_CFO);
+ cfo = sign_extend32(t, 11);
+ }
rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
}
@@ -1333,9 +1364,6 @@ static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
return -EINVAL;
}
rtw89_core_update_phy_ppdu(phy_ppdu);
- ieee80211_iterate_stations_atomic(rtwdev->hw,
- rtw89_core_rx_process_phy_ppdu_iter,
- phy_ppdu);
return 0;
}
@@ -1363,6 +1391,8 @@ static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
}
}
+ rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
+
return 0;
}
@@ -1376,6 +1406,10 @@ static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev,
rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n");
else
phy_ppdu->valid = true;
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_core_rx_process_phy_ppdu_iter,
+ phy_ppdu);
}
static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev,
@@ -1481,6 +1515,34 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
}
}
+static void rtw89_cancel_6ghz_probe_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ cancel_6ghz_probe_work);
+ struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
+ struct rtw89_pktofld_info *info;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (!rtwdev->scanning)
+ goto out;
+
+ list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
+ if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload))
+ continue;
+
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+
+ /* Don't delete/free info from pkt_list at this moment. Let it
+ * be deleted/freed in rtw89_release_pkt_list() after scanning,
+ * since if during scanning, pkt_list is accessed in bottom half.
+ */
+ }
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+}
+
static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@@ -1489,6 +1551,7 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
struct rtw89_pktofld_info *info;
const u8 *ies = mgmt->u.beacon.variable, *ssid_ie;
+ bool queue_work = false;
if (rx_status->band != NL80211_BAND_6GHZ)
return;
@@ -1497,16 +1560,22 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
if (ether_addr_equal(info->bssid, mgmt->bssid)) {
- rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ info->cancel = true;
+ queue_work = true;
continue;
}
if (!ssid_ie || ssid_ie[1] != info->ssid_len || info->ssid_len == 0)
continue;
- if (memcmp(&ssid_ie[2], info->ssid, info->ssid_len) == 0)
- rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ if (memcmp(&ssid_ie[2], info->ssid, info->ssid_len) == 0) {
+ info->cancel = true;
+ queue_work = true;
+ }
}
+
+ if (queue_work)
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
}
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
@@ -2596,6 +2665,7 @@ static void rtw89_track_work(struct work_struct *work)
rtw89_phy_ra_update(rtwdev);
rtw89_phy_cfo_track(rtwdev);
rtw89_phy_tx_path_div_track(rtwdev);
+ rtw89_phy_antdiv_track(rtwdev);
rtw89_phy_ul_tb_ctrl_track(rtwdev);
if (rtwdev->lps_enabled && !rtwdev->btc.lps)
@@ -2759,6 +2829,8 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
int i;
int ret;
@@ -2772,8 +2844,12 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
rtw89_core_txq_init(rtwdev, sta->txq[i]);
ewma_rssi_init(&rtwsta->avg_rssi);
- for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_snr_init(&rtwsta->avg_snr);
+ for (i = 0; i < ant_num; i++) {
ewma_rssi_init(&rtwsta->rssi[i]);
+ ewma_evm_init(&rtwsta->evm_min[i]);
+ ewma_evm_init(&rtwsta->evm_max[i]);
+ }
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
@@ -3433,6 +3509,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
mutex_unlock(&rtwdev->mutex);
cancel_work_sync(&rtwdev->c2h_work);
+ cancel_work_sync(&rtwdev->cancel_6ghz_probe_work);
cancel_work_sync(&btc->eapol_notify_work);
cancel_work_sync(&btc->arp_notify_work);
cancel_work_sync(&btc->dhcp_notify_work);
@@ -3444,6 +3521,7 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
cancel_delayed_work_sync(&rtwdev->cfo_track_work);
cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
+ cancel_delayed_work_sync(&rtwdev->antdiv_work);
mutex_lock(&rtwdev->mutex);
@@ -3479,6 +3557,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
+ INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!rtwdev->txq_wq)
return -ENOMEM;
@@ -3489,10 +3568,12 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtwdev->total_sta_assoc = 0;
rtw89_init_wait(&rtwdev->mcc.wait);
+ rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work);
+ INIT_WORK(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
skb_queue_head_init(&rtwdev->c2h_queue);
rtw89_core_ppdu_sts_init(rtwdev);
@@ -3587,7 +3668,7 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
if (chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) {
ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val);
- if (!ret)
+ if (ret)
return;
rtwdev->hal.acv = u8_get_bits(val, XTAL_SI_ACV_MASK);
@@ -3696,6 +3777,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
{
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw89_hal *hal = &rtwdev->hal;
int ret;
int tx_headroom = IEEE80211_HT_CTL_LEN;
@@ -3734,8 +3816,13 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO);
- hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
- hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
+ if (hal->ant_diversity) {
+ hw->wiphy->available_antennas_tx = 0x3;
+ hw->wiphy->available_antennas_rx = 0x3;
+ } else {
+ hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
+ hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
+ }
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 6df386a38fb4..f697902706ac 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -122,6 +122,13 @@ enum rtw89_cv {
CHIP_CV_INVALID = CHIP_CV_MAX,
};
+enum rtw89_bacam_ver {
+ RTW89_BACAM_V0,
+ RTW89_BACAM_V1,
+
+ RTW89_BACAM_V0_EXT = 99,
+};
+
enum rtw89_core_tx_type {
RTW89_CORE_TX_TYPE_DATA,
RTW89_CORE_TX_TYPE_MGMT,
@@ -551,6 +558,12 @@ struct rtw89_rx_phy_ppdu {
u8 chan_idx;
u8 ie;
u16 rate;
+ struct {
+ bool has;
+ u8 avg_snr;
+ u8 evm_max;
+ u8 evm_min;
+ } ofdm;
bool to_self;
bool valid;
};
@@ -2533,6 +2546,8 @@ struct rtw89_ra_report {
};
DECLARE_EWMA(rssi, 10, 16);
+DECLARE_EWMA(evm, 10, 16);
+DECLARE_EWMA(snr, 10, 16);
struct rtw89_ba_cam_entry {
struct list_head list;
@@ -2595,6 +2610,9 @@ struct rtw89_sta {
u8 prev_rssi;
struct ewma_rssi avg_rssi;
struct ewma_rssi rssi[RF_PATH_MAX];
+ struct ewma_snr avg_snr;
+ struct ewma_evm evm_min[RF_PATH_MAX];
+ struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
@@ -3090,6 +3108,12 @@ struct rtw89_imr_info {
u32 tmac_imr_set;
};
+struct rtw89_xtal_info {
+ u32 xcap_reg;
+ u32 sc_xo_mask;
+ u32 sc_xi_mask;
+};
+
struct rtw89_rrsr_cfgs {
struct rtw89_reg3_def ref_rate;
struct rtw89_reg3_def rsc;
@@ -3116,6 +3140,25 @@ struct rtw89_phy_ul_tb_info {
u8 def_if_bandedge;
};
+struct rtw89_antdiv_stats {
+ struct ewma_rssi cck_rssi_avg;
+ struct ewma_rssi ofdm_rssi_avg;
+ struct ewma_rssi non_legacy_rssi_avg;
+ u16 pkt_cnt_cck;
+ u16 pkt_cnt_ofdm;
+ u16 pkt_cnt_non_legacy;
+ u32 evm;
+};
+
+struct rtw89_antdiv_info {
+ struct rtw89_antdiv_stats target_stats;
+ struct rtw89_antdiv_stats main_stats;
+ struct rtw89_antdiv_stats aux_stats;
+ u8 training_count;
+ u8 rssi_pre;
+ bool get_stats;
+};
+
struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
@@ -3123,6 +3166,7 @@ struct rtw89_chip_info {
u8 fw_format_max;
bool try_ce_fw;
u32 fifo_size;
+ bool small_fifo_size;
u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
bool dis_2g_40m_ul_ofdma;
@@ -3145,7 +3189,7 @@ struct rtw89_chip_info {
u8 scam_num;
u8 bacam_num;
u8 bacam_dynamic_num;
- bool bacam_v1;
+ enum rtw89_bacam_ver bacam_ver;
u8 sec_ctrl_efuse_size;
u32 physical_efuse_size;
@@ -3162,6 +3206,7 @@ struct rtw89_chip_info {
const struct rtw89_phy_table *bb_gain_table;
const struct rtw89_phy_table *rf_table[RF_PATH_MAX];
const struct rtw89_phy_table *nctl_table;
+ const struct rtw89_rfk_tbl *nctl_post_table;
const struct rtw89_txpwr_table *byr_table;
const struct rtw89_phy_dig_gain_table *dig_table;
const struct rtw89_dig_regs *dig_regs;
@@ -3215,6 +3260,7 @@ struct rtw89_chip_info {
u32 dma_ch_mask;
u32 edcca_lvl_reg;
const struct wiphy_wowlan_support *wowlan_stub;
+ const struct rtw89_xtal_info *xtal_info;
};
union rtw89_bus_info {
@@ -3248,14 +3294,6 @@ enum rtw89_host_rpr_mode {
RTW89_RPR_MODE_STF
};
-struct rtw89_mac_info {
- struct rtw89_dle_info dle_info;
- struct rtw89_hfc_param hfc_param;
- enum rtw89_qta_mode qta_mode;
- u8 rpwm_seq_num;
- u8 cpwm_seq_num;
-};
-
#define RTW89_COMPLETION_BUF_SIZE 24
#define RTW89_WAIT_COND_IDLE UINT_MAX
@@ -3278,6 +3316,17 @@ static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
}
+struct rtw89_mac_info {
+ struct rtw89_dle_info dle_info;
+ struct rtw89_hfc_param hfc_param;
+ enum rtw89_qta_mode qta_mode;
+ u8 rpwm_seq_num;
+ u8 cpwm_seq_num;
+
+ /* see RTW89_FW_OFLD_WAIT_COND series for wait condition */
+ struct rtw89_wait_info fw_ofld_wait;
+};
+
enum rtw89_fw_type {
RTW89_FW_NORMAL = 1,
RTW89_FW_WOWLAN = 3,
@@ -3423,6 +3472,8 @@ struct rtw89_hal {
u8 tx_nss;
u8 rx_nss;
bool tx_path_diversity;
+ bool ant_diversity;
+ bool ant_diversity_fixed;
bool support_cckpd;
bool support_igi;
atomic_t roc_entity_idx;
@@ -4054,6 +4105,7 @@ struct rtw89_dev {
struct work_struct c2h_work;
struct work_struct ips_work;
struct work_struct load_firmware_work;
+ struct work_struct cancel_6ghz_probe_work;
struct list_head early_h2c_list;
@@ -4086,6 +4138,7 @@ struct rtw89_dev {
struct rtw89_phy_bb_gain_info bb_gain;
struct rtw89_phy_efuse_gain efuse_gain;
struct rtw89_phy_ul_tb_info ul_tb_info;
+ struct rtw89_antdiv_info antdiv;
struct delayed_work track_work;
struct delayed_work coex_act1_work;
@@ -4094,6 +4147,7 @@ struct rtw89_dev {
struct delayed_work cfo_track_work;
struct delayed_work forbid_ba_work;
struct delayed_work roc_work;
+ struct delayed_work antdiv_work;
struct rtw89_ppdu_sts_info ppdu_sts;
u8 total_sta_assoc;
bool scanning;
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 1e5b7a998716..6f418f14ec3f 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -3206,7 +3206,11 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
struct seq_file *m = (struct seq_file *)data;
struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
+ bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity;
+ u8 evm_min, evm_max;
u8 rssi;
+ u8 snr;
int i;
seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id);
@@ -3256,13 +3260,27 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
rssi = ewma_rssi_read(&rtwsta->avg_rssi);
seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [",
RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi);
- for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
+ for (i = 0; i < ant_num; i++) {
rssi = ewma_rssi_read(&rtwsta->rssi[i]);
seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi),
- hal->tx_path_diversity && (hal->antenna_tx & BIT(i)) ? "*" : "",
- i + 1 == rtwdev->chip->rf_path_num ? "" : ", ");
+ ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "",
+ i + 1 == ant_num ? "" : ", ");
}
seq_puts(m, "]\n");
+
+ seq_puts(m, "EVM: [");
+ for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) {
+ evm_min = ewma_evm_read(&rtwsta->evm_min[i]);
+ evm_max = ewma_evm_read(&rtwsta->evm_max[i]);
+
+ seq_printf(m, "%s(%2u.%02u, %2u.%02u)", i == 0 ? "" : " ",
+ evm_min >> 2, (evm_min & 0x3) * 25,
+ evm_max >> 2, (evm_max & 0x3) * 25);
+ }
+ seq_puts(m, "]\t");
+
+ snr = ewma_snr_read(&rtwsta->avg_snr);
+ seq_printf(m, "SNR: %u\n", snr);
}
static void
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index b9b675bf9d05..ad277f22b197 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -14,6 +14,8 @@
static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
struct sk_buff *skb);
+static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ struct rtw89_wait_info *wait, unsigned int cond);
static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
bool header)
@@ -807,7 +809,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
- if (chip->bacam_v1)
+ if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
else
SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
@@ -823,7 +825,7 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
- if (chip->bacam_v1) {
+ if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
SET_BA_CAM_STD_EN(skb->data, 1);
SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
}
@@ -848,8 +850,8 @@ fail:
return ret;
}
-static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
- u8 entry_idx, u8 uid)
+static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
+ u8 entry_idx, u8 uid)
{
struct sk_buff *skb;
int ret;
@@ -886,7 +888,7 @@ fail:
return ret;
}
-void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
+void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 entry_idx = chip->bacam_num;
@@ -894,7 +896,7 @@ void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
int i;
for (i = 0; i < chip->bacam_dynamic_num; i++) {
- rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
+ rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
entry_idx++;
uid++;
}
@@ -997,8 +999,8 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
list_for_each_entry_safe(info, tmp, pkt_list, list) {
if (notify_fw)
rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
- rtw89_core_release_bit_map(rtwdev->pkt_offload,
- info->id);
+ else
+ rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
list_del(&info->list);
kfree(info);
}
@@ -2440,7 +2442,9 @@ fail:
#define H2C_LEN_PKT_OFLD 4
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct sk_buff *skb;
+ unsigned int cond;
u8 *cmd;
int ret;
@@ -2460,23 +2464,26 @@ int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
H2C_FUNC_PACKET_OFLD, 1, 1,
H2C_LEN_PKT_OFLD);
- ret = rtw89_h2c_tx(rtwdev, skb, false);
+ cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
- rtw89_err(rtwdev, "failed to send h2c\n");
- goto fail;
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "failed to del pkt ofld: id %d, ret %d\n",
+ id, ret);
+ return ret;
}
+ rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
return 0;
-fail:
- dev_kfree_skb_any(skb);
-
- return ret;
}
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb_ofld)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct sk_buff *skb;
+ unsigned int cond;
u8 *cmd;
u8 alloc_id;
int ret;
@@ -2507,27 +2514,29 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
H2C_FUNC_PACKET_OFLD, 1, 1,
H2C_LEN_PKT_OFLD + skb_ofld->len);
- ret = rtw89_h2c_tx(rtwdev, skb, false);
+ cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
- rtw89_err(rtwdev, "failed to send h2c\n");
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "failed to add pkt ofld: id %d, ret %d\n",
+ alloc_id, ret);
rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
- goto fail;
+ return ret;
}
return 0;
-fail:
- dev_kfree_skb_any(skb);
-
- return ret;
}
#define H2C_LEN_SCAN_LIST_OFFLOAD 4
int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
struct list_head *chan_list)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_mac_chinfo *ch_info;
struct sk_buff *skb;
int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
+ unsigned int cond;
u8 *cmd;
int ret;
@@ -2574,27 +2583,27 @@ int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- ret = rtw89_h2c_tx(rtwdev, skb, false);
+ cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
- rtw89_err(rtwdev, "failed to send h2c\n");
- goto fail;
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
+ return ret;
}
return 0;
-fail:
- dev_kfree_skb_any(skb);
-
- return ret;
}
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *option,
struct rtw89_vif *rtwvif)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
struct rtw89_h2c_scanofld *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ unsigned int cond;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -2633,17 +2642,15 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
len);
- ret = rtw89_h2c_tx(rtwdev, skb, false);
+ cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
- rtw89_err(rtwdev, "failed to send h2c\n");
- goto fail;
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
+ return ret;
}
return 0;
-fail:
- dev_kfree_skb_any(skb);
-
- return ret;
}
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
@@ -3019,9 +3026,8 @@ static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
continue;
list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
- rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
- rtw89_core_release_bit_map(rtwdev->pkt_offload,
- info->id);
+ if (test_bit(info->id, rtwdev->pkt_offload))
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
list_del(&info->list);
kfree(info);
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 675f85c41471..048283750a2d 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -138,8 +138,13 @@ enum rtw89_pkt_offload_op {
RTW89_PKT_OFLD_OP_ADD,
RTW89_PKT_OFLD_OP_DEL,
RTW89_PKT_OFLD_OP_READ,
+
+ NUM_OF_RTW89_PKT_OFFLOAD_OP,
};
+#define RTW89_PKT_OFLD_WAIT_TAG(pkt_id, pkt_op) \
+ ((pkt_id) * NUM_OF_RTW89_PKT_OFFLOAD_OP + (pkt_op))
+
enum rtw89_scanofld_notify_reason {
RTW89_SCAN_DWELL_NOTIFY,
RTW89_SCAN_PRE_TX_NOTIFY,
@@ -277,6 +282,7 @@ struct rtw89_pktofld_info {
u8 ssid_len;
u8 bssid[ETH_ALEN];
u16 channel_6ghz;
+ bool cancel;
};
static inline void RTW89_SET_FWCMD_RA_IS_DIS(void *cmd, u32 val)
@@ -3215,16 +3221,17 @@ static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb)
#define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2)
#define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN)
-#define RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
-#define RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2))
-#define RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
-#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 16))
-#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
+struct rtw89_c2h_done_ack {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_DONE_ACK_W2_CAT GENMASK(1, 0)
+#define RTW89_C2H_DONE_ACK_W2_CLASS GENMASK(7, 2)
+#define RTW89_C2H_DONE_ACK_W2_FUNC GENMASK(15, 8)
+#define RTW89_C2H_DONE_ACK_W2_H2C_RETURN GENMASK(23, 16)
+#define RTW89_C2H_DONE_ACK_W2_H2C_SEQ GENMASK(31, 24)
#define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3339,6 +3346,16 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_c2h_pkt_ofld_rsp {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
+
+#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_ID GENMASK(7, 0)
+#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP GENMASK(10, 8)
+#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN GENMASK(31, 16)
+
struct rtw89_h2c_bcnfltr {
__le32 w0;
} __packed;
@@ -3497,17 +3514,28 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 9 - FW offload */
#define H2C_CL_MAC_FW_OFLD 0x9
-#define H2C_FUNC_PACKET_OFLD 0x1
-#define H2C_FUNC_MAC_MACID_PAUSE 0x8
-#define H2C_FUNC_USR_EDCA 0xF
-#define H2C_FUNC_TSF32_TOGL 0x10
-#define H2C_FUNC_OFLD_CFG 0x14
-#define H2C_FUNC_ADD_SCANOFLD_CH 0x16
-#define H2C_FUNC_SCANOFLD 0x17
-#define H2C_FUNC_PKT_DROP 0x1b
-#define H2C_FUNC_CFG_BCNFLTR 0x1e
-#define H2C_FUNC_OFLD_RSSI 0x1f
-#define H2C_FUNC_OFLD_TP 0x20
+enum rtw89_fw_ofld_h2c_func {
+ H2C_FUNC_PACKET_OFLD = 0x1,
+ H2C_FUNC_MAC_MACID_PAUSE = 0x8,
+ H2C_FUNC_USR_EDCA = 0xF,
+ H2C_FUNC_TSF32_TOGL = 0x10,
+ H2C_FUNC_OFLD_CFG = 0x14,
+ H2C_FUNC_ADD_SCANOFLD_CH = 0x16,
+ H2C_FUNC_SCANOFLD = 0x17,
+ H2C_FUNC_PKT_DROP = 0x1b,
+ H2C_FUNC_CFG_BCNFLTR = 0x1e,
+ H2C_FUNC_OFLD_RSSI = 0x1f,
+ H2C_FUNC_OFLD_TP = 0x20,
+
+ NUM_OF_RTW89_FW_OFLD_H2C_FUNC,
+};
+
+#define RTW89_FW_OFLD_WAIT_COND(tag, func) \
+ ((tag) * NUM_OF_RTW89_FW_OFLD_H2C_FUNC + (func))
+
+#define RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(pkt_id, pkt_op) \
+ RTW89_FW_OFLD_WAIT_COND(RTW89_PKT_OFLD_WAIT_TAG(pkt_id, pkt_op), \
+ H2C_FUNC_PACKET_OFLD)
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
@@ -3648,7 +3676,7 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
-void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev);
+void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
@@ -3711,8 +3739,8 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (chip->bacam_v1)
- rtw89_fw_h2c_init_ba_cam_v1(rtwdev);
+ if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
+ rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev);
}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b8019cfc11b2..64dc36470840 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1475,6 +1475,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,},
/* 8852B PCIE WOW */
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
+ /* 8851B PCIE WOW */
+ .ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
};
EXPORT_SYMBOL(rtw89_mac_size);
@@ -2598,6 +2600,7 @@ static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_mac_c2h_info c2h_info = {0};
@@ -2629,6 +2632,13 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
hal->tx_path_diversity = true;
}
+ if (chip->rf_path_num == 1) {
+ hal->antenna_tx = RF_A;
+ hal->antenna_rx = RF_A;
+ if ((efuse->rfe_type % 3) == 2)
+ hal->ant_diversity = true;
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_FW,
"phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n",
hal->tx_nss, tx_nss, chip->tx_nss,
@@ -2637,6 +2647,7 @@ int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
"ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n",
tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx);
rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity);
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "Antenna diversity=%d\n", hal->ant_diversity);
return 0;
}
@@ -4327,6 +4338,8 @@ rtw89_mac_c2h_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
static void
rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ /* N.B. This will run in interrupt context. */
+
rtw89_debug(rtwdev, RTW89_DBG_FW,
"C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n",
RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data),
@@ -4336,15 +4349,44 @@ rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
}
static void
-rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
-{
+rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len)
+{
+ /* N.B. This will run in interrupt context. */
+ struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ const struct rtw89_c2h_done_ack *c2h =
+ (const struct rtw89_c2h_done_ack *)skb_c2h->data;
+ u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT);
+ u8 h2c_class = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CLASS);
+ u8 h2c_func = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_FUNC);
+ u8 h2c_return = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_H2C_RETURN);
+ u8 h2c_seq = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_H2C_SEQ);
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+
rtw89_debug(rtwdev, RTW89_DBG_FW,
"C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n",
- RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data),
- RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data),
- RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data),
- RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data),
- RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data));
+ h2c_cat, h2c_class, h2c_func, h2c_return, h2c_seq);
+
+ if (h2c_cat != H2C_CAT_MAC)
+ return;
+
+ switch (h2c_class) {
+ default:
+ return;
+ case H2C_CL_MAC_FW_OFLD:
+ switch (h2c_func) {
+ default:
+ return;
+ case H2C_FUNC_ADD_SCANOFLD_CH:
+ case H2C_FUNC_SCANOFLD:
+ cond = RTW89_FW_OFLD_WAIT_COND(0, h2c_func);
+ break;
+ }
+
+ data.err = !!h2c_return;
+ rtw89_complete_cond(fw_ofld_wait, cond, &data);
+ return;
+ }
}
static void
@@ -4360,9 +4402,22 @@ rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
}
static void
-rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h,
u32 len)
{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ const struct rtw89_c2h_pkt_ofld_rsp *c2h =
+ (const struct rtw89_c2h_pkt_ofld_rsp *)skb_c2h->data;
+ u16 pkt_len = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN);
+ u8 pkt_id = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_ID);
+ u8 pkt_op = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP);
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+
+ data.err = !pkt_len;
+ cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(pkt_id, pkt_op);
+
+ rtw89_complete_cond(wait, cond, &data);
}
static void
@@ -4570,6 +4625,21 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
switch (class) {
default:
return false;
+ case RTW89_MAC_C2H_CLASS_INFO:
+ switch (func) {
+ default:
+ return false;
+ case RTW89_MAC_C2H_FUNC_REC_ACK:
+ case RTW89_MAC_C2H_FUNC_DONE_ACK:
+ return true;
+ }
+ case RTW89_MAC_C2H_CLASS_OFLD:
+ switch (func) {
+ default:
+ return false;
+ case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP:
+ return true;
+ }
case RTW89_MAC_C2H_CLASS_MCC:
return true;
}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index a8d9847ef0b4..d3922d4fe288 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -817,6 +817,7 @@ struct rtw89_mac_size_set {
const struct rtw89_ple_quota ple_qt58;
const struct rtw89_ple_quota ple_qt_52a_wow;
const struct rtw89_ple_quota ple_qt_52b_wow;
+ const struct rtw89_ple_quota ple_qt_51b_wow;
};
extern const struct rtw89_mac_size_set rtw89_mac_size;
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index ee4588b61b8f..f40d70f016e4 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -762,13 +762,18 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_hal *hal = &rtwdev->hal;
- if (rx_ant != hw->wiphy->available_antennas_rx && rx_ant != hal->antenna_rx)
+ if (hal->ant_diversity) {
+ if (tx_ant != rx_ant || hweight32(tx_ant) != 1)
+ return -EINVAL;
+ } else if (rx_ant != hw->wiphy->available_antennas_rx && rx_ant != hal->antenna_rx) {
return -EINVAL;
+ }
mutex_lock(&rtwdev->mutex);
hal->antenna_tx = tx_ant;
hal->antenna_rx = rx_ant;
hal->tx_path_diversity = false;
+ hal->ant_diversity_fixed = true;
mutex_unlock(&rtwdev->mutex);
return 0;
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 70b4754667c9..b53f346fef97 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -1003,10 +1003,10 @@ static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
min_cnt = min(bd_cnt, wd_cnt);
if (min_cnt == 0) {
/* This message can be frequently shown in low power mode or
- * high traffic with 8852B, and we have recognized it as normal
+ * high traffic with small FIFO chips, and we have recognized it as normal
* behavior, so print with mask RTW89_DBG_TXRX in these situations.
*/
- if (rtwpci->low_power || chip->chip_id == RTL8852B)
+ if (rtwpci->low_power || chip->small_fifo_size)
debug_mask = RTW89_DBG_TXRX;
else
debug_mask = RTW89_DBG_UNEXP;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index c7e906123416..568488da3ff1 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -10,6 +10,7 @@
#include "ps.h"
#include "reg.h"
#include "sar.h"
+#include "txrx.h"
#include "util.h"
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
@@ -1400,7 +1401,8 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
- rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
+ if (chip->chip_id != RTL8851B)
+ rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
if (chip->chip_id == RTL8852B)
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
@@ -1414,6 +1416,9 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
nctl_table = chip->nctl_table;
rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
+
+ if (chip->nctl_post_table)
+ rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
}
static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
@@ -2338,27 +2343,29 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
{
+ const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
u32 reg_mask;
if (sc_xo)
- reg_mask = B_AX_XTAL_SC_XO_MASK;
+ reg_mask = xtal->sc_xo_mask;
else
- reg_mask = B_AX_XTAL_SC_XI_MASK;
+ reg_mask = xtal->sc_xi_mask;
- return (u8)rtw89_read32_mask(rtwdev, R_AX_XTAL_ON_CTRL0, reg_mask);
+ return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
}
static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
u8 val)
{
+ const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
u32 reg_mask;
if (sc_xo)
- reg_mask = B_AX_XTAL_SC_XO_MASK;
+ reg_mask = xtal->sc_xo_mask;
else
- reg_mask = B_AX_XTAL_SC_XI_MASK;
+ reg_mask = xtal->sc_xi_mask;
- rtw89_write32_mask(rtwdev, R_AX_XTAL_ON_CTRL0, reg_mask, val);
+ rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
}
static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
@@ -2371,7 +2378,7 @@ static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
if (!force && cfo->crystal_cap == crystal_cap)
return;
crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
- if (chip->chip_id == RTL8852A) {
+ if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
@@ -2946,6 +2953,126 @@ static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
}
+static
+void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
+{
+ ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
+ ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
+ ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
+ antdiv_sts->pkt_cnt_cck = 0;
+ antdiv_sts->pkt_cnt_ofdm = 0;
+ antdiv_sts->pkt_cnt_non_legacy = 0;
+ antdiv_sts->evm = 0;
+}
+
+static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_antdiv_stats *stats)
+{
+ if (GET_DATA_RATE_MODE(phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
+ if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
+ ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
+ stats->pkt_cnt_cck++;
+ } else {
+ ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
+ stats->pkt_cnt_ofdm++;
+ stats->evm += phy_ppdu->ofdm.evm_min;
+ }
+ } else {
+ ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
+ stats->pkt_cnt_non_legacy++;
+ stats->evm += phy_ppdu->ofdm.evm_min;
+ }
+}
+
+static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
+{
+ if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
+ stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
+ return ewma_rssi_read(&stats->non_legacy_rssi_avg);
+ else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
+ stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
+ return ewma_rssi_read(&stats->ofdm_rssi_avg);
+ else
+ return ewma_rssi_read(&stats->cck_rssi_avg);
+}
+
+static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
+{
+ return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
+}
+
+void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (!hal->ant_diversity || hal->ant_diversity_fixed)
+ return;
+
+ rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
+
+ if (!antdiv->get_stats)
+ return;
+
+ if (hal->antenna_rx == RF_A)
+ rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
+ else if (hal->antenna_rx == RF_B)
+ rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
+}
+
+static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
+ 0x0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
+ 0x0, RTW89_PHY_0);
+
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
+ 0x0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
+ 0x0, RTW89_PHY_0);
+
+ rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
+ 0x0, RTW89_PHY_0);
+
+ rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
+ 0x0100, RTW89_PHY_0);
+
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
+ 0x1, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
+ 0x0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
+ 0x0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
+ 0x0, RTW89_PHY_0);
+}
+
+static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+
+ rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
+ rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
+ rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
+}
+
+static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (!hal->ant_diversity)
+ return;
+
+ antdiv->get_stats = false;
+ antdiv->rssi_pre = 0;
+ rtw89_phy_antdiv_sts_reset(rtwdev);
+ rtw89_phy_antdiv_reg_init(rtwdev);
+}
+
static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
{
struct rtw89_phy_stat *phystat = &rtwdev->phystat;
@@ -4114,6 +4241,144 @@ void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
&done);
}
+#define ANTDIV_MAIN 0
+#define ANTDIV_AUX 1
+
+static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 default_ant, optional_ant;
+
+ if (!hal->ant_diversity || hal->antenna_tx == 0)
+ return;
+
+ if (hal->antenna_tx == RF_B) {
+ default_ant = ANTDIV_AUX;
+ optional_ant = ANTDIV_MAIN;
+ } else {
+ default_ant = ANTDIV_MAIN;
+ optional_ant = ANTDIV_AUX;
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
+ default_ant, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
+ default_ant, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
+ optional_ant, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
+ default_ant, RTW89_PHY_0);
+}
+
+static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
+ hal->antenna_tx = hal->antenna_rx;
+}
+
+static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool no_change = false;
+ u8 main_rssi, aux_rssi;
+ u8 main_evm, aux_evm;
+ u32 candidate;
+
+ antdiv->get_stats = false;
+ antdiv->training_count = 0;
+
+ main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
+ main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
+ aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
+ aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
+
+ if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
+ candidate = RF_A;
+ else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
+ candidate = RF_B;
+ else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_A;
+ else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
+ candidate = RF_B;
+ else
+ no_change = true;
+
+ if (no_change) {
+ /* swap back from training antenna to original */
+ rtw89_phy_swap_hal_antenna(rtwdev);
+ return;
+ }
+
+ hal->antenna_tx = candidate;
+ hal->antenna_rx = candidate;
+}
+
+static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+ u64 state_period;
+
+ if (antdiv->training_count % 2 == 0) {
+ if (antdiv->training_count == 0)
+ rtw89_phy_antdiv_sts_reset(rtwdev);
+
+ antdiv->get_stats = true;
+ state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
+ } else {
+ antdiv->get_stats = false;
+ state_period = msecs_to_jiffies(ANTDIV_DELAY);
+
+ rtw89_phy_swap_hal_antenna(rtwdev);
+ rtw89_phy_antdiv_set_ant(rtwdev);
+ }
+
+ antdiv->training_count++;
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work,
+ state_period);
+}
+
+void rtw89_phy_antdiv_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ antdiv_work.work);
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
+ rtw89_phy_antdiv_training_state(rtwdev);
+ } else {
+ rtw89_phy_antdiv_decision_state(rtwdev);
+ rtw89_phy_antdiv_set_ant(rtwdev);
+ }
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 rssi, rssi_pre;
+
+ if (!hal->ant_diversity || hal->ant_diversity_fixed)
+ return;
+
+ rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
+ rssi_pre = antdiv->rssi_pre;
+ antdiv->rssi_pre = rssi;
+ rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
+
+ if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
+ return;
+
+ antdiv->training_count = 0;
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0);
+}
+
static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
{
rtw89_phy_ccx_top_setting_init(rtwdev);
@@ -4133,6 +4398,8 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
rtw89_phy_ul_tb_info_init(rtwdev);
+ rtw89_phy_antdiv_init(rtwdev);
+ rtw89_phy_antdiv_set_ant(rtwdev);
rtw89_phy_init_rf_nctl(rtwdev);
rtw89_chip_rfk_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 7535867d0f48..ab174a0ba488 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -67,6 +67,14 @@
#define UL_TB_TF_CNT_L2H_TH 100
#define UL_TB_TF_CNT_H2L_TH 70
+#define ANTDIV_TRAINNING_CNT 2
+#define ANTDIV_TRAINNING_INTVL 30
+#define ANTDIV_DELAY 110
+#define ANTDIV_TP_DIFF_TH_HIGH 100
+#define ANTDIV_TP_DIFF_TH_LOW 5
+#define ANTDIV_EVM_DIFF_TH 8
+#define ANTDIV_RSSI_DIFF_TH 3
+
#define CCX_MAX_PERIOD 2097
#define CCX_MAX_PERIOD_UNIT 32
#define MS_TO_4US_RATIO 250
@@ -549,6 +557,10 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
void rtw89_phy_dig(struct rtw89_dev *rtwdev);
void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev);
+void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
+void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev);
+void rtw89_phy_antdiv_work(struct work_struct *work);
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx mac_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 266e4231b5f3..21f68787ff10 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -243,6 +243,12 @@
#define B_AX_XTAL_SC_XI_MASK GENMASK(16, 10)
#define B_AX_XTAL_SC_MASK GENMASK(6, 0)
+#define R_AX_XTAL_ON_CTRL3 0x028C
+#define B_AX_XTAL_SC_INIT_A_BLOCK_MASK GENMASK(30, 24)
+#define B_AX_XTAL_SC_LPS_A_BLOCK_MASK GENMASK(22, 16)
+#define B_AX_XTAL_SC_XO_A_BLOCK_MASK GENMASK(14, 8)
+#define B_AX_XTAL_SC_XI_A_BLOCK_MASK GENMASK(6, 0)
+
#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
#define R_AX_EECS_EESK_FUNC_SEL 0x02D8
@@ -3574,6 +3580,7 @@
#define RR_MOD_MASK GENMASK(19, 16)
#define RR_MOD_DCK GENMASK(14, 10)
#define RR_MOD_RGM GENMASK(13, 4)
+#define RR_MOD_RXB GENMASK(9, 5)
#define RR_MOD_V_DOWN 0x0
#define RR_MOD_V_STANDBY 0x1
#define RR_TXAGC 0x10001
@@ -3713,6 +3720,7 @@
#define RR_RXBB 0x83
#define RR_RXBB_VOBUF GENMASK(15, 12)
#define RR_RXBB_C2G GENMASK(16, 10)
+#define RR_RXBB_C2 GENMASK(11, 8)
#define RR_RXBB_C1G GENMASK(9, 8)
#define RR_RXBB_FATT GENMASK(7, 0)
#define RR_RXBB_ATTR GENMASK(7, 4)
@@ -3776,15 +3784,21 @@
#define RR_LOGEN 0xa3
#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_SX 0xaf
+#define RR_IBD 0xc9
+#define RR_IBD_VAL GENMASK(4, 0)
#define RR_LDO 0xb1
#define RR_LDO_SEL GENMASK(8, 6)
#define RR_VCO 0xb2
+#define RR_VCO_SEL GENMASK(9, 8)
+#define RR_VCI 0xb3
+#define RR_VCI_ON BIT(7)
#define RR_LPF 0xb7
#define RR_LPF_BUSY BIT(8)
#define RR_XTALX2 0xb8
#define RR_MALSEL 0xbe
#define RR_SYNFB 0xc5
#define RR_SYNFB_LK BIT(15)
+#define RR_AACK 0xca
#define RR_LCKST 0xcf
#define RR_LCKST_BIN BIT(0)
#define RR_LCK_TRG 0xd3
@@ -3852,6 +3866,9 @@
#define B_ENABLE_CCK BIT(5)
#define R_RSTB_ASYNC 0x0704
#define B_RSTB_ASYNC_ALL BIT(1)
+#define R_P0_ANT_SW 0x0728
+#define B_P0_HW_ANTSW_DIS_BY_GNT_BT BIT(12)
+#define B_P0_TRSW_TX_EXTEND GENMASK(3, 0)
#define R_MAC_PIN_SEL 0x0734
#define B_CH_IDX_SEG0 GENMASK(23, 16)
#define R_PLCP_HISTOGRAM 0x0738
@@ -4455,10 +4472,24 @@
#define B_P0_RFCTM_VAL GENMASK(25, 20)
#define R_P0_RFCTM_RDY BIT(26)
#define R_P0_TRSW 0x5868
-#define B_P0_TRSW_B BIT(0)
-#define B_P0_TRSW_A BIT(1)
+#define B_P0_BT_FORCE_ANTIDX_EN BIT(12)
#define B_P0_TRSW_X BIT(2)
+#define B_P0_TRSW_A BIT(1)
+#define B_P0_TX_ANT_SEL BIT(1)
+#define B_P0_TRSW_B BIT(0)
+#define B_P0_ANT_TRAIN_EN BIT(0)
#define B_P0_TRSW_SO_A2 GENMASK(7, 5)
+#define R_P0_ANTSEL 0x586C
+#define B_P0_ANTSEL_SW_5G BIT(25)
+#define B_P0_ANTSEL_SW_2G BIT(23)
+#define B_P0_ANTSEL_BTG_TRX BIT(21)
+#define B_P0_ANTSEL_CGCS_CTRL BIT(17)
+#define B_P0_ANTSEL_HW_CTRL BIT(16)
+#define B_P0_ANTSEL_TX_ORI GENMASK(15, 12)
+#define B_P0_ANTSEL_RX_ALT GENMASK(11, 8)
+#define B_P0_ANTSEL_RX_ORI GENMASK(7, 4)
+#define R_RFSW_CTRL_ANT0_BASE 0x5870
+#define B_RFSW_CTRL_ANT_MAPPING GENMASK(15, 0)
#define R_P0_RFM 0x5894
#define B_P0_RFM_DIS_WL BIT(7)
#define B_P0_RFM_TX_OPT BIT(6)
@@ -4572,6 +4603,8 @@
#define IQK_DF4_TXT_8_25MHZ 0x021
#define R_IQK_CFG 0x8034
#define B_IQK_CFG_SET GENMASK(5, 4)
+#define R_IQK_RXA 0x8044
+#define B_IQK_RXAGC GENMASK(15, 13)
#define R_TPG_SEL 0x8068
#define R_TPG_MOD 0x806C
#define B_TPG_MOD_F GENMASK(2, 1)
@@ -4619,6 +4652,7 @@
#define B_PRT_COM_SYNERR BIT(30)
#define B_PRT_COM_DCI GENMASK(27, 16)
#define B_PRT_COM_CORV GENMASK(15, 8)
+#define B_RPT_COM_RDY GENMASK(15, 0)
#define B_PRT_COM_DCQ GENMASK(11, 0)
#define B_PRT_COM_RXOV BIT(8)
#define B_PRT_COM_GL GENMASK(7, 4)
@@ -4730,11 +4764,15 @@
#define B_IQKINF2_KCNT GENMASK(15, 8)
#define B_IQKINF2_NCTLV GENMASK(7, 0)
#define R_DCOF0 0xC000
+#define B_DCOF0_RST BIT(17)
#define B_DCOF0_V GENMASK(4, 1)
#define R_DCOF1 0xC004
+#define B_DCOF1_RST BIT(17)
#define B_DCOF1_S BIT(0)
#define R_DCOF8 0xC020
#define B_DCOF8_V GENMASK(4, 1)
+#define R_DCOF9 0xC024
+#define B_DCOF9_RST BIT(17)
#define R_DACK_S0P0 0xC040
#define B_DACK_S0P0_OK BIT(31)
#define R_DACK_BIAS00 0xc048
@@ -4786,6 +4824,7 @@
#define B_ADDCK0D_VAL GENMASK(25, 16)
#define R_ADDCK0 0xC0F4
#define B_ADDCK0_TRG BIT(11)
+#define B_ADDCK0_IQ BIT(10)
#define B_ADDCK0 GENMASK(9, 8)
#define B_ADDCK0_MAN GENMASK(5, 4)
#define B_ADDCK0_EN BIT(4)
@@ -4797,6 +4836,7 @@
#define B_ADDCK0_RL0 GENMASK(17, 8)
#define R_ADDCKR0 0xC0FC
#define B_ADDCKR0_A0 GENMASK(19, 10)
+#define B_ADDCKR0_DC GENMASK(15, 4)
#define B_ADDCKR0_A1 GENMASK(9, 0)
#define R_DACK10 0xC100
#define B_DACK10 GENMASK(4, 1)
@@ -4847,6 +4887,11 @@
#define R_ADDCKR1 0xC1fC
#define B_ADDCKR1_A0 GENMASK(19, 10)
#define B_ADDCKR1_A1 GENMASK(9, 0)
+#define R_DACKN0_CTL 0xC210
+#define B_DACKN0_EN BIT(0)
+#define B_DACKN0_V GENMASK(21, 14)
+#define R_DACKN1_CTL 0xC224
+#define B_DACKN1_V GENMASK(21, 14)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
new file mode 100644
index 000000000000..b68ebe950c4e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2022-2023 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8851b.h"
+#include "rtw8851b_rfk_table.h"
+#include "rtw8851b_table.h"
+#include "txrx.h"
+#include "util.h"
+
+#define RTW8851B_FW_FORMAT_MAX 0
+#define RTW8851B_FW_BASENAME "rtw89/rtw8851b_fw"
+#define RTW8851B_MODULE_FIRMWARE \
+ RTW8851B_FW_BASENAME ".bin"
+
+static const struct rtw89_hfc_ch_cfg rtw8851b_hfc_chcfg_pcie[] = {
+ {5, 343, grp_0}, /* ACH 0 */
+ {5, 343, grp_0}, /* ACH 1 */
+ {5, 343, grp_0}, /* ACH 2 */
+ {5, 343, grp_0}, /* ACH 3 */
+ {0, 0, grp_0}, /* ACH 4 */
+ {0, 0, grp_0}, /* ACH 5 */
+ {0, 0, grp_0}, /* ACH 6 */
+ {0, 0, grp_0}, /* ACH 7 */
+ {4, 344, grp_0}, /* B0MGQ */
+ {4, 344, grp_0}, /* B0HIQ */
+ {0, 0, grp_0}, /* B1MGQ */
+ {0, 0, grp_0}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8851b_hfc_pubcfg_pcie = {
+ 448, /* Group 0 */
+ 0, /* Group 1 */
+ 448, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8851b_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8851b_hfc_chcfg_pcie, &rtw8851b_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
+static const struct rtw89_dle_mem rtw8851b_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
+ &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ &rtw89_mac_size.ple_qt58},
+ [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6,
+ &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
+ &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+ &rtw89_mac_size.ple_qt_51b_wow},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
+ &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
+ &rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt13,
+ &rtw89_mac_size.ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const struct rtw89_xtal_info rtw8851b_xtal_info = {
+ .xcap_reg = R_AX_XTAL_ON_CTRL3,
+ .sc_xo_mask = B_AX_XTAL_SC_XO_A_BLOCK_MASK,
+ .sc_xi_mask = B_AX_XTAL_SC_XI_A_BLOCK_MASK,
+};
+
+static const struct rtw89_chip_ops rtw8851b_chip_ops = {
+ .fem_setup = NULL,
+ .fill_txdesc = rtw89_core_fill_txdesc,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc,
+ .h2c_dctl_sec_cam = NULL,
+};
+
+#ifdef CONFIG_PM
+static const struct wiphy_wowlan_support rtw_wowlan_stub_8851b = {
+ .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+ .n_patterns = RTW89_MAX_PATTERN_NUM,
+ .pattern_max_len = RTW89_MAX_PATTERN_SIZE,
+ .pattern_min_len = 1,
+};
+#endif
+
+const struct rtw89_chip_info rtw8851b_chip_info = {
+ .chip_id = RTL8851B,
+ .ops = &rtw8851b_chip_ops,
+ .fw_basename = RTW8851B_FW_BASENAME,
+ .fw_format_max = RTW8851B_FW_FORMAT_MAX,
+ .try_ce_fw = true,
+ .fifo_size = 196608,
+ .small_fifo_size = true,
+ .dle_scc_rsvd_size = 98304,
+ .max_amsdu_limit = 3500,
+ .dis_2g_40m_ul_ofdma = true,
+ .rsvd_ple_ofst = 0x2f800,
+ .hfc_param_ini = rtw8851b_hfc_param_ini_pcie,
+ .dle_mem = rtw8851b_dle_mem_pcie,
+ .wde_qempty_acq_num = 4,
+ .wde_qempty_mgq_sel = 4,
+ .rf_base_addr = {0xe000},
+ .pwr_on_seq = NULL,
+ .pwr_off_seq = NULL,
+ .bb_table = &rtw89_8851b_phy_bb_table,
+ .bb_gain_table = &rtw89_8851b_phy_bb_gain_table,
+ .rf_table = {&rtw89_8851b_phy_radioa_table,},
+ .nctl_table = &rtw89_8851b_phy_nctl_table,
+ .nctl_post_table = &rtw8851b_nctl_post_defs_tbl,
+ .byr_table = &rtw89_8851b_byr_table,
+ .dflt_parms = &rtw89_8851b_dflt_parms,
+ .rfe_parms_conf = rtw89_8851b_rfe_parms_conf,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = NULL,
+ .tssi_dbw_table = NULL,
+ .support_chanctx_num = 0,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
+ .support_bw160 = false,
+ .support_ul_tb_ctrl = true,
+ .hw_sec_hdr = false,
+ .rf_path_num = 1,
+ .tx_nss = 1,
+ .rx_nss = 1,
+ .acam_num = 32,
+ .bcam_num = 20,
+ .scam_num = 128,
+ .bacam_num = 2,
+ .bacam_dynamic_num = 4,
+ .bacam_ver = RTW89_BACAM_V0,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 2048,
+ .limit_efuse_size = 1280,
+ .dav_phy_efuse_size = 0,
+ .dav_log_efuse_size = 0,
+ .phycap_addr = 0x580,
+ .phycap_size = 128,
+ .para_ver = 0,
+ .wlcx_desired = 0x06000000,
+ .btcx_desired = 0x7,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED),
+ .low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD,
+ .hci_func_en_addr = R_AX_HCI_FUNC_EN,
+ .h2c_desc_size = sizeof(struct rtw89_txwd_body),
+ .txwd_body_size = sizeof(struct rtw89_txwd_body),
+ .bss_clr_map_reg = R_BSS_CLR_MAP_V1,
+ .dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
+ BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
+ BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
+ .edcca_lvl_reg = R_SEG0R_EDCCA_LVL_V1,
+#ifdef CONFIG_PM
+ .wowlan_stub = &rtw_wowlan_stub_8851b,
+#endif
+ .xtal_info = &rtw8851b_xtal_info,
+};
+EXPORT_SYMBOL(rtw8851b_chip_info);
+
+MODULE_FIRMWARE(RTW8851B_MODULE_FIRMWARE);
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8851B driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.h b/drivers/net/wireless/realtek/rtw89/rtw8851b.h
new file mode 100644
index 000000000000..e34b7d09ad6d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2022-2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8851B_H__
+#define __RTW89_8851B_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8851B 1
+#define BB_PATH_NUM_8851B 1
+
+extern const struct rtw89_chip_info rtw8851b_chip_info;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
new file mode 100644
index 000000000000..6eb47ed82010
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c
@@ -0,0 +1,1775 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2022-2023 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8851b.h"
+#include "rtw8851b_rfk.h"
+#include "rtw8851b_rfk_table.h"
+#include "rtw8851b_table.h"
+
+#define RTW8851B_RXK_GROUP_NR 4
+#define RTW8851B_TXK_GROUP_NR 1
+#define RTW8851B_IQK_VER 0x2a
+#define RTW8851B_IQK_SS 1
+#define RTW8851B_LOK_GRAM 10
+
+enum rtw8851b_iqk_type {
+ ID_TXAGC = 0x0,
+ ID_FLOK_COARSE = 0x1,
+ ID_FLOK_FINE = 0x2,
+ ID_TXK = 0x3,
+ ID_RXAGC = 0x4,
+ ID_RXK = 0x5,
+ ID_NBTXK = 0x6,
+ ID_NBRXK = 0x7,
+ ID_FLOK_VBUFFER = 0x8,
+ ID_A_FLOK_COARSE = 0x9,
+ ID_G_FLOK_COARSE = 0xa,
+ ID_A_FLOK_FINE = 0xb,
+ ID_G_FLOK_FINE = 0xc,
+ ID_IQK_RESTORE = 0x10,
+};
+
+static const u32 g_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10e, 0x116, 0x28e, 0x296};
+static const u32 g_idxattc2[RTW8851B_RXK_GROUP_NR] = {0x0, 0xf, 0x0, 0xf};
+static const u32 g_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x0, 0x1, 0x2, 0x3};
+static const u32 a_idxrxgain[RTW8851B_RXK_GROUP_NR] = {0x10C, 0x112, 0x28c, 0x292};
+static const u32 a_idxattc2[RTW8851B_RXK_GROUP_NR] = {0xf, 0xf, 0xf, 0xf};
+static const u32 a_idxrxagc[RTW8851B_RXK_GROUP_NR] = {0x4, 0x5, 0x6, 0x7};
+static const u32 a_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
+static const u32 a_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
+static const u32 a_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x0a};
+static const u32 a_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
+static const u32 g_power_range[RTW8851B_TXK_GROUP_NR] = {0x0};
+static const u32 g_track_range[RTW8851B_TXK_GROUP_NR] = {0x6};
+static const u32 g_gain_bb[RTW8851B_TXK_GROUP_NR] = {0x10};
+static const u32 g_itqt[RTW8851B_TXK_GROUP_NR] = {0x12};
+
+static const u32 rtw8851b_backup_bb_regs[] = {0xc0ec, 0xc0e8};
+static const u32 rtw8851b_backup_rf_regs[] = {
+ 0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
+
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8851b_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8851b_backup_rf_regs)
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ return RF_A;
+}
+
+static void _adc_fifo_rst(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0101);
+ fsleep(10);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x1111);
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
+ rf_mode != 2, 2, 5000, false,
+ rtwdev, path, 0x00, RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
+}
+
+static void _drck(struct rtw89_dev *rtwdev)
+{
+ u32 rck_d;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_DRCK_RES, B_DRCK_POL);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
+
+ rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
+}
+
+static void _addck_backup(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
+
+ dack->addck_d[0][0] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
+ dack->addck_d[0][1] = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
+ dack->msbk_d[0][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
+ dack->msbk_d[0][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
+ }
+
+ dack->biask_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
+ dack->biask_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
+ dack->dadck_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00) + 24;
+ dack->dadck_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01) + 24;
+}
+
+static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 index)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 idx_offset, path_offset;
+ u32 offset, reg;
+ u32 tmp;
+ u8 i;
+
+ if (index == 0)
+ idx_offset = 0;
+ else
+ idx_offset = 0x14;
+
+ if (path == RF_PATH_A)
+ path_offset = 0;
+ else
+ path_offset = 0x28;
+
+ offset = idx_offset + path_offset;
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
+
+ /* msbk_d: 15/14/13/12 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
+ reg = 0xc200 + offset;
+ rtw89_phy_write32(rtwdev, reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
+ rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
+
+ /* msbk_d: 11/10/9/8 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
+ reg = 0xc204 + offset;
+ rtw89_phy_write32(rtwdev, reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
+ rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
+
+ /* msbk_d: 7/6/5/4 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
+ reg = 0xc208 + offset;
+ rtw89_phy_write32(rtwdev, reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
+ rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
+
+ /* msbk_d: 3/2/1/0 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i] << (i * 8);
+ reg = 0xc20c + offset;
+ rtw89_phy_write32(rtwdev, reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
+ rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
+
+ /* dadak_d/biask_d */
+ tmp = 0x0;
+ tmp = (dack->biask_d[path][index] << 22) |
+ (dack->dadck_d[path][index] << 14);
+ reg = 0xc210 + offset;
+ rtw89_phy_write32(rtwdev, reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", reg,
+ rtw89_phy_read32_mask(rtwdev, reg, MASKDWORD));
+
+ rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + offset, B_DACKN0_EN, 0x1);
+}
+
+static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u8 index;
+
+ for (index = 0; index < 2; index++)
+ _dack_reload_by_path(rtwdev, path, index);
+}
+
+static void _addck(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_EN, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_ADDCKR0, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_RST, 0x0);
+}
+
+static void _new_dadck(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 i_dc, q_dc, ic, qc;
+ u32 val;
+ int ret;
+
+ rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_setup_defs_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_ADDCKR0, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DADCK ret = %d\n", ret);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x0);
+ i_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_IQ, 0x1);
+ q_dc = rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_DC);
+
+ ic = 0x80 - sign_extend32(i_dc, 11) * 6;
+ qc = 0x80 - sign_extend32(q_dc, 11) * 6;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]before DADCK, i_dc=0x%x, q_dc=0x%x\n", i_dc, q_dc);
+
+ dack->dadck_d[0][0] = ic;
+ dack->dadck_d[0][1] = qc;
+
+ rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_V, dack->dadck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_V, dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]after DADCK, 0xc210=0x%x, 0xc224=0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DACKN0_CTL, MASKDWORD),
+ rtw89_phy_read32_mask(rtwdev, R_DACKN1_CTL, MASKDWORD));
+
+ rtw89_rfk_parser(rtwdev, &rtw8851b_dadck_post_defs_tbl);
+}
+
+static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
+ return false;
+
+ return true;
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_1_defs_tbl);
+ _dack_reset(rtwdev, RF_PATH_A);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
+
+ ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
+ 1, 10000, false, rtwdev);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8851b_dack_s0_2_defs_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
+
+ _dack_backup_s0(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _dack_manual_off(struct rtw89_dev *rtwdev)
+{
+ rtw89_rfk_parser(rtwdev, &rtw8851b_dack_manual_off_defs_tbl);
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 rf0_0;
+
+ dack->dack_done = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK 0x2\n");
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+ rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]RF0=0x%x\n", rf0_0);
+
+ _drck(rtwdev);
+ _dack_manual_off(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
+
+ _addck(rtwdev);
+ _addck_backup(rtwdev);
+ _addck_reload(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
+
+ _dack(rtwdev);
+ _new_dadck(rtwdev);
+ _dack_dump(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
+
+ dack->dack_done = true;
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
+{
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
+
+ for (i = 0; i <= 0x9f; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD,
+ 0x00010000 + i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI));
+ }
+
+ for (i = 0; i <= 0x9f; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD,
+ 0x00010000 + i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ));
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00000000);
+}
+
+static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
+{
+ bool fail1 = false, fail2 = false;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 10, 8200, false,
+ rtwdev, 0xbff8, MASKBYTE0);
+ if (ret) {
+ fail1 = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]NCTL1 IQK timeout!!!\n");
+ }
+
+ fsleep(10);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+ 10, 200, false,
+ rtwdev, R_RPT_COM, B_RPT_COM_RDY);
+ if (ret) {
+ fail2 = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]NCTL2 IQK timeout!!!\n");
+ }
+
+ fsleep(10);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, ret = %d, notready = %x fail=%d,%d\n",
+ path, ret, fail1 || fail2, fail1, fail2);
+
+ return fail1 || fail2;
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready;
+ u32 iqk_cmd;
+
+ switch (ktype) {
+ case ID_A_FLOK_COARSE:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_A_FLOK_COARSE ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_G_FLOK_COARSE:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_G_FLOK_COARSE ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_A_FLOK_FINE:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_A_FLOK_FINE ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_G_FLOK_FINE:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_G_FLOK_FINE ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_TXK ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_RXAGC:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_RXAGC ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ iqk_cmd = 0x708 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_RXK ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0xc + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_NBTXK:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_NBTXK ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
+ 0x00b);
+ iqk_cmd = 0x408 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]============ S%d ID_NBRXK ============\n", path);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT,
+ 0x011);
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ notready = _iqk_check_cal(rtwdev, path);
+ if (iqk_info->iqk_sram_en &&
+ (ktype == ID_NBRXK || ktype == ID_RXK))
+ _iqk_sram(rtwdev, path);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, ktype= %x, id = %x, notready = %x\n",
+ path, ktype, iqk_cmd + 1, notready);
+
+ return notready;
+}
+
+static bool _rxk_2g_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u32 rf_0;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ fsleep(10);
+ rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0));
+
+ if (gp == 0x3) {
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+ }
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail)
+ _iqk_sram(rtwdev, path);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_rxcfir[path] | 0x2);
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_rxcfir[path]);
+ return kfail;
+}
+
+static bool _rxk_5g_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u32 rf_0;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0; gp < RTW8851B_RXK_GROUP_NR; gp++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, 0x03ff0, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ fsleep(100);
+ rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
+ rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_RXB));
+
+ if (gp == 0x3) {
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+ }
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, WBRXK 0x8008 = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail)
+ _iqk_sram(rtwdev, path);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_rxcfir[path] | 0x2);
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
+ 0x40000000);
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_rxcfir[path]);
+ return kfail;
+}
+
+static bool _iqk_5g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u8 gp = 0x3;
+ u32 rf_0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RXA2, RR_RXA2_ATT, a_idxattc2[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ fsleep(100);
+ rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0));
+
+ if (gp == 0x3) {
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n",
+ path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, 0x40000002);
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->is_wb_rxiqk[path] = false;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_rxcfir[path]);
+
+ return kfail;
+}
+
+static bool _iqk_2g_nbrxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u8 gp = 0x3;
+ u32 rf_0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, gp = %x\n", path, gp);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2, g_idxattc2[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP_V1, gp);
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ fsleep(10);
+ rf_0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, rf_0);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, RXAGC 0x8008 = 0x%x, rxbb = %x\n",
+ path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD),
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003e0));
+
+ if (gp == 0x3) {
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD) | 0x2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, NBRXK 0x8008 = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, WBRXK 0x8008 = 0x%x\n",
+ path, rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD));
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, 0x40000002);
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->is_wb_rxiqk[path] = false;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x3c = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_rxcfir[path]);
+ return kfail;
+}
+
+static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
+
+ if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80)
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_80_defs_tbl);
+ else
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_rxclk_others_defs_tbl);
+}
+
+static bool _txk_5g_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, a_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_txcfir[path] | 0x2);
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_txcfir[path]);
+ return kfail;
+}
+
+static bool _txk_2g_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, g_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_txcfir[path] | 0x2);
+ iqk_info->is_wb_txiqk[path] = false;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ iqk_info->is_wb_txiqk[path] = true;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_txcfir[path]);
+ return kfail;
+}
+
+static bool _iqk_5g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, a_itqt[gp]);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC, MASKDWORD) | 0x2;
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, 0x40000002);
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->is_wb_rxiqk[path] = false;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_txcfir[path]);
+ return kfail;
+}
+
+static bool _iqk_2g_nbtxk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool kfail = false;
+ bool notready;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0x0; gp < RTW8851B_TXK_GROUP_NR; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, g_gain_bb[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, MASKDWORD, g_itqt[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, 0x40000002);
+ iqk_info->is_wb_rxiqk[path] = false;
+ } else {
+ iqk_info->is_wb_rxiqk[path] = false;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, kfail = 0x%x, 0x8%x38 = 0x%x\n", path, kfail,
+ 1 << path, iqk_info->nb_txcfir[path]);
+ return kfail;
+}
+
+static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ static const u32 g_txbb[RTW8851B_LOK_GRAM] = {
+ 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17};
+ static const u32 g_itqt[RTW8851B_LOK_GRAM] = {
+ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b};
+ static const u32 g_wa[RTW8851B_LOK_GRAM] = {
+ 0x00, 0x04, 0x08, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17};
+ bool fail = false;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x6);
+
+ for (i = 0; i < RTW8851B_LOK_GRAM; i++) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, g_txbb[i]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, g_wa[i]);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000109 | (1 << (4 + path)));
+ fail |= _iqk_check_cal(rtwdev, path);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, g_itqt[i]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000309 | (1 << (4 + path)));
+ fail |= _iqk_check_cal(rtwdev, path);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i,
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i,
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S0, i = %x, 0x58 = %x\n", i,
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK));
+ }
+
+ return fail;
+}
+
+static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ static const u32 a_txbb[RTW8851B_LOK_GRAM] = {
+ 0x02, 0x06, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x17};
+ static const u32 a_itqt[RTW8851B_LOK_GRAM] = {
+ 0x09, 0x09, 0x09, 0x12, 0x12, 0x12, 0x1b, 0x1b, 0x1b, 0x1b};
+ static const u32 a_wa[RTW8851B_LOK_GRAM] = {
+ 0x80, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x97};
+ bool fail = false;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_GR1, 0x7);
+
+ for (i = 0; i < RTW8851B_LOK_GRAM; i++) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_TXIG, RR_TXIG_TG, a_txbb[i]);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RR_LUTWA_M1, a_wa[i]);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000109 | (1 << (4 + path)));
+ fail |= _iqk_check_cal(rtwdev, path);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP, B_KIP_IQP_IQSW, a_itqt[i]);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000309 | (1 << (4 + path)));
+ fail |= _iqk_check_cal(rtwdev, path);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK, B_IQK_RFC_ON, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S0, i = %x, 0x8[19:15] = 0x%x,0x8[09:05] = 0x%x\n", i,
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0xf8000),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_DTXLOK, 0x003e0));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S0, i = %x, 0x9[19:16] = 0x%x,0x9[09:06] = 0x%x\n", i,
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0xf0000),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV2, 0x003c0));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S0, i = %x, 0x58 = %x\n", i,
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_TXMO, RFREG_MASK));
+ }
+
+ return fail;
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_2G\n");
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_2ghz_defs_tbl);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RTW89_BAND_5G\n");
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_txk_5ghz_defs_tbl);
+ break;
+ default:
+ break;
+ }
+}
+
+#define IQK_LOK_RETRY 1
+
+static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool lok_is_fail;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (i = 0; i < IQK_LOK_RETRY; i++) {
+ _iqk_txk_setting(rtwdev, path);
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ lok_is_fail = _iqk_2g_lok(rtwdev, phy_idx, path);
+ else
+ lok_is_fail = _iqk_5g_lok(rtwdev, phy_idx, path);
+
+ if (!lok_is_fail)
+ break;
+ }
+
+ if (iqk_info->is_nbiqk) {
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ iqk_info->iqk_tx_fail[0][path] =
+ _iqk_2g_nbtxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_tx_fail[0][path] =
+ _iqk_5g_nbtxk(rtwdev, phy_idx, path);
+ } else {
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ iqk_info->iqk_tx_fail[0][path] =
+ _txk_2g_group_sel(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_tx_fail[0][path] =
+ _txk_5g_group_sel(rtwdev, phy_idx, path);
+ }
+
+ _iqk_rxclk_setting(rtwdev, path);
+ _iqk_rxk_setting(rtwdev, path);
+ _adc_fifo_rst(rtwdev, phy_idx, path);
+
+ if (iqk_info->is_nbiqk) {
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ iqk_info->iqk_rx_fail[0][path] =
+ _iqk_2g_nbrxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_rx_fail[0][path] =
+ _iqk_5g_nbrxk(rtwdev, phy_idx, path);
+ } else {
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ iqk_info->iqk_rx_fail[0][path] =
+ _rxk_2g_group_sel(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_rx_fail[0][path] =
+ _rxk_5g_group_sel(rtwdev, phy_idx, path);
+ }
+}
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev,
+ u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8851b_backup_bb_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]backup bb reg : %x, value =%x\n",
+ rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev,
+ u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path,
+ rtw8851b_backup_rf_regs[i], RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]backup rf S%d reg : %x, value =%x\n", rf_path,
+ rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
+ const u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8851b_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore bb reg : %x, value =%x\n",
+ rtw8851b_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
+ const u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8851b_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8851b_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx = 0;
+
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
+ iqk_info->iqk_table_idx[path] = idx;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n",
+ path, phy, rtwdev->dbcc_en ? "on" : "off",
+ iqk_info->iqk_band[path] == 0 ? "2G" :
+ iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
+ iqk_info->iqk_ch[path],
+ iqk_info->iqk_bw[path] == 0 ? "20M" :
+ iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
+ iqk_info->iqk_times, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, iqk_info->syn1to2= 0x%x\n",
+ path, iqk_info->syn1to2);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ bool fail;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00001219);
+ fsleep(10);
+ fail = _iqk_check_cal(rtwdev, path);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] restore fail=%d\n", fail);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_afebb_restore_defs_tbl);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_rfk_parser(rtwdev, &rtw8851b_iqk_macbb_defs_tbl);
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx, path;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
+
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->thermal_rek_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ iqk_info->iqk_channel[idx] = 0x0;
+ for (path = 0; path < RF_PATH_NUM_8851B; path++) {
+ iqk_info->lok_cor_fail[idx][path] = false;
+ iqk_info->lok_fin_fail[idx][path] = false;
+ iqk_info->iqk_tx_fail[idx][path] = false;
+ iqk_info->iqk_rx_fail[idx][path] = false;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u32 backup_rf_val[RTW8851B_IQK_SS][BACKUP_RF_REGS_NR];
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK,
+ BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK strat!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->kcount = 0;
+ iqk_info->version = RTW8851B_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+
+ _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
+ _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
+ _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK,
+ BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5;
+ u32 rck_val;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
+ false, rtwdev, path, RR_RCKS, BIT(3));
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
+ rck_val, ret);
+
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
+}
+
+void rtw8851b_aack(struct rtw89_dev *rtwdev)
+{
+ u32 tmp05, ib[4];
+ u32 tmp;
+ int ret;
+ int rek;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]DO AACK\n");
+
+ tmp05 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RR_MOD_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
+
+ for (rek = 0; rek < 4; rek++) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201e);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_AACK, RFREG_MASK, 0x8201f);
+ fsleep(100);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp,
+ 1, 1000, false,
+ rtwdev, RF_PATH_A, 0xd0, BIT(16));
+ if (ret)
+ rtw89_warn(rtwdev, "[LCK]AACK timeout\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x1);
+ for (i = 0; i < 4; i++) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCO, RR_VCO_SEL, i);
+ ib[i] = rtw89_read_rf(rtwdev, RF_PATH_A, RR_IBD, RR_IBD_VAL);
+ }
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_VCI, RR_VCI_ON, 0x0);
+
+ if (ib[0] != 0 && ib[1] != 0 && ib[2] != 0 && ib[3] != 0)
+ break;
+ }
+
+ if (rek != 0)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]AACK rek = %d\n", rek);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, tmp05);
+}
+
+void rtw8851b_rck(struct rtw89_dev *rtwdev)
+{
+ _rck(rtwdev, RF_PATH_A);
+}
+
+void rtw8851b_dack(struct rtw89_dev *rtwdev)
+{
+ _dac_cal(rtwdev, false);
+}
+
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+static void _bw_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ enum rtw89_bandwidth bw, bool dav)
+{
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+ u32 rf_reg18;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ if (rf_reg18 == INV_RF_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+ rf_reg18 &= ~RR_CFGCH_BW;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_20M);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_40M);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BW, CFGCH_BW_80M);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]Fail to set CH\n");
+ }
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set %x at path%d, %x =0x%x\n",
+ bw, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ _bw_setting(rtwdev, RF_PATH_A, bw, true);
+ _bw_setting(rtwdev, RF_PATH_A, bw, false);
+}
+
+static bool _set_s0_arfc18(struct rtw89_dev *rtwdev, u32 val)
+{
+ u32 bak;
+ u32 tmp;
+ int ret;
+
+ bak = rtw89_read_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RR_LDO_SEL, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK, val);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, tmp, tmp == 0, 1, 1000,
+ false, rtwdev, RF_PATH_A, RR_LPF, RR_LPF_BUSY);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]LCK timeout\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LDO, RFREG_MASK, bak);
+
+ return !!ret;
+}
+
+static void _lck_check(struct rtw89_dev *rtwdev)
+{
+ u32 tmp;
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN MMD reset\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_SYN, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MMD, RR_MMD_RST_EN, 0x0);
+ }
+
+ udelay(10);
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]re-set RF 0x18\n");
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+ }
+
+ if (rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RR_SYNFB_LK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]SYN off/on\n");
+
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RFREG_MASK, tmp);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SX, RFREG_MASK, tmp);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_SYNLUT, RR_SYNLUT_MOD, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x1);
+ tmp = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ _set_s0_arfc18(rtwdev, tmp);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LCK_TRG, RR_LCK_TRGSEL, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[LCK]0xb2=%x, 0xc5=%x\n",
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_VCO, RFREG_MASK),
+ rtw89_read_rf(rtwdev, RF_PATH_A, RR_SYNFB, RFREG_MASK));
+ }
+}
+
+static void _set_ch(struct rtw89_dev *rtwdev, u32 val)
+{
+ bool timeout;
+
+ timeout = _set_s0_arfc18(rtwdev, val);
+ if (!timeout)
+ _lck_check(rtwdev);
+}
+
+static void _ch_setting(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 central_ch, bool dav)
+{
+ u32 reg18_addr = dav ? RR_CFGCH : RR_CFGCH_V1;
+ bool is_2g_ch = central_ch <= 14;
+ u32 rf_reg18;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]===> %s\n", __func__);
+
+ rf_reg18 = rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK);
+ rf_reg18 &= ~(RR_CFGCH_BAND1 | RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH |
+ RR_CFGCH_BCN | RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_CH, central_ch);
+
+ if (!is_2g_ch)
+ rf_reg18 |= FIELD_PREP(RR_CFGCH_BAND1, CFGCH_BAND1_5G) |
+ FIELD_PREP(RR_CFGCH_BAND0, CFGCH_BAND0_5G);
+
+ rf_reg18 &= ~(RR_CFGCH_POW_LCK | RR_CFGCH_TRX_AH | RR_CFGCH_BCN |
+ RR_CFGCH_BW2) & RFREG_MASK;
+ rf_reg18 |= RR_CFGCH_BW2;
+
+ if (path == RF_PATH_A && dav)
+ _set_ch(rtwdev, rf_reg18);
+ else
+ rtw89_write_rf(rtwdev, path, reg18_addr, RFREG_MASK, rf_reg18);
+
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 0);
+ rtw89_write_rf(rtwdev, path, RR_LCKST, RR_LCKST_BIN, 1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]CH: %d for Path-%d, reg0x%x = 0x%x\n",
+ central_ch, path, reg18_addr,
+ rtw89_read_rf(rtwdev, path, reg18_addr, RFREG_MASK));
+}
+
+static void _ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch)
+{
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, true);
+ _ch_setting(rtwdev, RF_PATH_A, central_ch, false);
+}
+
+static void _set_rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M2, 0x12);
+
+ if (bw == RTW89_CHANNEL_WIDTH_20)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x1b);
+ else if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x13);
+ else if (bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0xb);
+ else
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB, 0x3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK] set S%d RXBB BW 0x3F = 0x%x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_LUTWD0, RR_LUTWD0_LB));
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE2, RR_LUTWE2_RTXBW, 0x0);
+}
+
+static void _rxbb_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_bandwidth bw)
+{
+ u8 kpath, path;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RF_PATH_NUM_8851B; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ _set_rxbb_bw(rtwdev, bw, path);
+ }
+}
+
+static void rtw8851b_ctrl_bw_ch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 central_ch,
+ enum rtw89_band band, enum rtw89_bandwidth bw)
+{
+ _ctrl_ch(rtwdev, central_ch);
+ _ctrl_bw(rtwdev, phy, bw);
+ _rxbb_bw(rtwdev, phy, bw);
+}
+
+void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8851b_ctrl_bw_ch(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
new file mode 100644
index 000000000000..d86c630ff47e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2022-2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8851B_RFK_H__
+#define __RTW89_8851B_RFK_H__
+
+#include "core.h"
+
+void rtw8851b_aack(struct rtw89_dev *rtwdev);
+void rtw8851b_rck(struct rtw89_dev *rtwdev);
+void rtw8851b_dack(struct rtw89_dev *rtwdev);
+void rtw8851b_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8851b_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
new file mode 100644
index 000000000000..0f7711c50bd1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2022-2023 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "reg.h"
+#include "rtw8851b.h"
+
+static const struct rtw89_pci_info rtw8851b_pci_info = {
+ .txbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_trunc_mode = MAC_AX_BD_TRUNC,
+ .rxbd_mode = MAC_AX_RXBD_PKT,
+ .tag_mode = MAC_AX_TAG_MULTI,
+ .tx_burst = MAC_AX_TX_BURST_2048B,
+ .rx_burst = MAC_AX_RX_BURST_128B,
+ .wd_dma_idle_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .wd_dma_act_intvl = MAC_AX_WD_DMA_INTVL_256NS,
+ .multi_tag_num = MAC_AX_TAG_NUM_8,
+ .lbc_en = MAC_AX_PCIE_ENABLE,
+ .lbc_tmr = MAC_AX_LBC_TMR_2MS,
+ .autok_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_en = MAC_AX_PCIE_DISABLE,
+ .io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+
+ .init_cfg_reg = R_AX_PCIE_INIT_CFG1,
+ .txhci_en_bit = B_AX_TXHCI_EN,
+ .rxhci_en_bit = B_AX_RXHCI_EN,
+ .rxbd_mode_bit = B_AX_RXBD_MODE,
+ .exp_ctrl_reg = R_AX_PCIE_EXP_CTRL,
+ .max_tag_num_mask = B_AX_MAX_TAG_NUM,
+ .rxbd_rwptr_clr_reg = R_AX_RXBD_RWPTR_CLR,
+ .txbd_rwptr_clr2_reg = 0,
+ .dma_stop1 = {R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_MASK_V1},
+ .dma_stop2 = {0},
+ .dma_busy1 = {R_AX_PCIE_DMA_BUSY1, DMA_BUSY1_CHECK_V1},
+ .dma_busy2_reg = 0,
+ .dma_busy3_reg = R_AX_PCIE_DMA_BUSY1,
+
+ .rpwm_addr = R_AX_PCIE_HRPWM,
+ .cpwm_addr = R_AX_CPWM,
+ .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
+ BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
+ BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+ .bd_idx_addr_low_power = NULL,
+ .dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_single,
+
+ .ltr_set = rtw89_pci_ltr_set,
+ .fill_txaddr_info = rtw89_pci_fill_txaddr_info,
+ .config_intr_mask = rtw89_pci_config_intr_mask,
+ .enable_intr = rtw89_pci_enable_intr,
+ .disable_intr = rtw89_pci_disable_intr,
+ .recognize_intrs = rtw89_pci_recognize_intrs,
+};
+
+static const struct rtw89_driver_info rtw89_8851be_info = {
+ .chip = &rtw8851b_chip_info,
+ .bus = {
+ .pci = &rtw8851b_pci_info,
+ },
+};
+
+static const struct pci_device_id rtw89_8851be_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb851),
+ .driver_data = (kernel_ulong_t)&rtw89_8851be_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8851be_id_table);
+
+static struct pci_driver rtw89_8851be_driver = {
+ .name = "rtw89_8851be",
+ .id_table = rtw89_8851be_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8851be_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8851BE driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index d7930efd89b7..a8a58ff36e95 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -463,6 +463,12 @@ static const struct rtw89_imr_info rtw8852a_imr_info = {
.tmac_imr_set = B_AX_TMAC_IMR_SET,
};
+static const struct rtw89_xtal_info rtw8852a_xtal_info = {
+ .xcap_reg = R_AX_XTAL_ON_CTRL0,
+ .sc_xo_mask = B_AX_XTAL_SC_XO_MASK,
+ .sc_xi_mask = B_AX_XTAL_SC_XI_MASK,
+};
+
static const struct rtw89_rrsr_cfgs rtw8852a_rrsr_cfgs = {
.ref_rate = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_REF_RATE_SEL, 0},
.rsc = {R_AX_TRXPTCL_RRSR_CTL_0, B_AX_WMAC_RESP_RSC_MASK, 2},
@@ -2069,6 +2075,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.fw_format_max = RTW8852A_FW_FORMAT_MAX,
.try_ce_fw = false,
.fifo_size = 458752,
+ .small_fifo_size = false,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
@@ -2085,6 +2092,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.rf_table = {&rtw89_8852a_phy_radioa_table,
&rtw89_8852a_phy_radiob_table,},
.nctl_table = &rtw89_8852a_phy_nctl_table,
+ .nctl_post_table = NULL,
.byr_table = &rtw89_8852a_byr_table,
.dflt_parms = &rtw89_8852a_dflt_parms,
.rfe_parms_conf = NULL,
@@ -2107,7 +2115,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.scam_num = 128,
.bacam_num = 2,
.bacam_dynamic_num = 4,
- .bacam_v1 = false,
+ .bacam_ver = RTW89_BACAM_V0,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 1536,
@@ -2159,6 +2167,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852a,
#endif
+ .xtal_info = &rtw8852a_xtal_info,
};
EXPORT_SYMBOL(rtw8852a_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index eaa2ea0586bc..fa12d4a7f79f 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -2506,6 +2506,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.fw_format_max = RTW8852B_FW_FORMAT_MAX,
.try_ce_fw = true,
.fifo_size = 196608,
+ .small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
.max_amsdu_limit = 3500,
.dis_2g_40m_ul_ofdma = true,
@@ -2522,6 +2523,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.rf_table = {&rtw89_8852b_phy_radioa_table,
&rtw89_8852b_phy_radiob_table,},
.nctl_table = &rtw89_8852b_phy_nctl_table,
+ .nctl_post_table = NULL,
.byr_table = &rtw89_8852b_byr_table,
.dflt_parms = &rtw89_8852b_dflt_parms,
.rfe_parms_conf = NULL,
@@ -2544,7 +2546,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.scam_num = 128,
.bacam_num = 2,
.bacam_dynamic_num = 4,
- .bacam_v1 = false,
+ .bacam_ver = RTW89_BACAM_V0,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
@@ -2598,6 +2600,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852b,
#endif
+ .xtal_info = NULL,
};
EXPORT_SYMBOL(rtw8852b_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index ceb819a62efc..d9272bce0325 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -2805,6 +2805,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.fw_format_max = RTW8852C_FW_FORMAT_MAX,
.try_ce_fw = false,
.fifo_size = 458752,
+ .small_fifo_size = false,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
.dis_2g_40m_ul_ofdma = false,
@@ -2821,6 +2822,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.rf_table = {&rtw89_8852c_phy_radiob_table,
&rtw89_8852c_phy_radioa_table,},
.nctl_table = &rtw89_8852c_phy_nctl_table,
+ .nctl_post_table = NULL,
.byr_table = &rtw89_8852c_byr_table,
.dflt_parms = &rtw89_8852c_dflt_parms,
.rfe_parms_conf = NULL,
@@ -2844,7 +2846,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.scam_num = 128,
.bacam_num = 8,
.bacam_dynamic_num = 8,
- .bacam_v1 = true,
+ .bacam_ver = RTW89_BACAM_V0_EXT,
.sec_ctrl_efuse_size = 4,
.physical_efuse_size = 1216,
.logical_efuse_size = 2048,
@@ -2897,6 +2899,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852c,
#endif
+ .xtal_info = NULL,
};
EXPORT_SYMBOL(rtw8852c_chip_info);
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 98eb9607cd21..d880ecb879ca 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -298,12 +298,19 @@
le32_get_bits(*((const __le32 *)ie), GENMASK(4, 0))
#define RTW89_GET_PHY_STS_IE_LEN(ie) \
le32_get_bits(*((const __le32 *)ie), GENMASK(11, 5))
-#define RTW89_GET_PHY_STS_IE01_CH_IDX(ie) \
- le32_get_bits(*((const __le32 *)ie), GENMASK(23, 16))
-#define RTW89_GET_PHY_STS_IE01_FD_CFO(ie) \
- le32_get_bits(*((const __le32 *)(ie) + 1), GENMASK(19, 8))
-#define RTW89_GET_PHY_STS_IE01_PREMB_CFO(ie) \
- le32_get_bits(*((const __le32 *)(ie) + 1), GENMASK(31, 20))
+
+struct rtw89_phy_sts_ie0 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
+
+#define RTW89_PHY_STS_IE01_W0_CH_IDX GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W1_FD_CFO GENMASK(19, 8)
+#define RTW89_PHY_STS_IE01_W1_PREMB_CFO GENMASK(31, 20)
+#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
+#define RTW89_PHY_STS_IE01_W2_EVM_MAX GENMASK(15, 8)
+#define RTW89_PHY_STS_IE01_W2_EVM_MIN GENMASK(23, 16)
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 2ca8abb70f11..364e54622150 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -91,7 +91,7 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
u32 wow_reason_reg;
u8 reason;
- if (chip_id == RTL8852A || chip_id == RTL8852B)
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
wow_reason_reg = R_AX_C2HREG_DATA3 + 3;
else
wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3;
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index 9d1efa0d9394..ec2555bb83d5 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -172,10 +172,16 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
int ret;
unsigned int value;
- ret = reset_control_reset(priv->reset);
+ ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
+ ret = reset_control_reset(priv->reset);
+ if (ret) {
+ clk_disable_unprepare(priv->clk);
+ return ret;
+ }
+
udelay(RESET_COMPLETE_TIME);
/* usb2_otg_aca_en == 0 */
@@ -277,8 +283,13 @@ static int phy_meson_g12a_usb2_init(struct phy *phy)
static int phy_meson_g12a_usb2_exit(struct phy *phy)
{
struct phy_meson_g12a_usb2_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = reset_control_reset(priv->reset);
+ if (!ret)
+ clk_disable_unprepare(priv->clk);
- return reset_control_reset(priv->reset);
+ return ret;
}
/* set_mode is not needed, mode setting is handled via the UTMI bus */
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 3831f596d50c..3b0e4daa9de9 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -38,6 +38,9 @@
#define POLL_TIMEOUT_US 5000
#define PLL_LOCK_TIMEOUT 100000
+#define DP_PLL0 BIT(0)
+#define DP_PLL1 BIT(1)
+
#define TORRENT_COMMON_CDB_OFFSET 0x0
#define TORRENT_TX_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
@@ -66,16 +69,11 @@
*/
#define PHY_AUX_CTRL 0x04
#define PHY_RESET 0x20
-#define PMA_TX_ELEC_IDLE_MASK 0xF0U
#define PMA_TX_ELEC_IDLE_SHIFT 4
-#define PHY_L00_RESET_N_MASK 0x01U
#define PHY_PMA_XCVR_PLLCLK_EN 0x24
#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
-#define PHY_POWER_STATE_LN_0 0x0000
-#define PHY_POWER_STATE_LN_1 0x0008
-#define PHY_POWER_STATE_LN_2 0x0010
-#define PHY_POWER_STATE_LN_3 0x0018
+#define PHY_POWER_STATE_LN(ln) ((ln) * 8)
#define PMA_XCVR_POWER_STATE_REQ_LN_MASK 0x3FU
#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
#define PHY_PMA_CMN_READY 0x34
@@ -323,6 +321,7 @@ struct cdns_torrent_phy {
void __iomem *base; /* DPTX registers base */
void __iomem *sd_base; /* SD0801 registers base */
u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ u32 dp_pll;
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct device *dev;
@@ -905,88 +904,129 @@ void cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(struct cdns_torrent_phy *cdns_phy,
/* Setting VCO for 10.8GHz */
case 2700:
case 5400:
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
+ if (cdns_phy->dp_pll & DP_PLL0)
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_FBH_OVRD_M0, 0x0022);
+
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0028);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBH_OVRD_M0, 0x0022);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_FBL_OVRD_M0, 0x000C);
+ }
break;
/* Setting VCO for 9.72GHz */
case 1620:
case 2430:
case 3240:
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0061);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x3333);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0042);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
break;
/* Setting VCO for 8.64GHz */
case 2160:
case 4320:
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0056);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVL_M0, 0x6666);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x003A);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
break;
/* Setting VCO for 8.1GHz */
case 8100:
- cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
- cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
- cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
- cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
- cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
- cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ cdns_torrent_phy_write(regmap, CMN_PLL0_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL0_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL0_CTRL_M0, 0x0002);
+ }
+ if (cdns_phy->dp_pll & DP_PLL1) {
+ cdns_torrent_phy_write(regmap, CMN_PLL1_DSM_DIAG_M0, 0x0004);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_PADJ_M0, 0x0509);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CP_IADJ_M0, 0x0F00);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_FILT_PADJ_M0, 0x0F08);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_INTDIV_M0, 0x0051);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_FRACDIVH_M0, 0x0002);
+ cdns_torrent_phy_write(regmap, CMN_PLL1_HIGH_THR_M0, 0x0036);
+ cdns_torrent_phy_write(regmap, CMN_PDIAG_PLL1_CTRL_M0, 0x0002);
+ }
break;
}
}
+/* Set PLL used for DP configuration */
+static int cdns_torrent_dp_get_pll(struct cdns_torrent_phy *cdns_phy,
+ enum cdns_torrent_phy_type phy_t2)
+{
+ switch (phy_t2) {
+ case TYPE_PCIE:
+ case TYPE_USB:
+ cdns_phy->dp_pll = DP_PLL1;
+ break;
+ case TYPE_SGMII:
+ case TYPE_QSGMII:
+ cdns_phy->dp_pll = DP_PLL0;
+ break;
+ case TYPE_NONE:
+ cdns_phy->dp_pll = DP_PLL0 | DP_PLL1;
+ break;
+ default:
+ dev_err(cdns_phy->dev, "Unsupported PHY configuration\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Enable or disable PLL for selected lanes.
*/
static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp,
bool enable)
{
- u32 rd_val;
- u32 ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
+ u32 rd_val, pll_ack_val;
+ int ret;
/*
* Used to determine, which bits to check for or enable in
@@ -996,28 +1036,18 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
/* Used to enable or disable lanes. */
u32 pll_val;
- /* Select values of registers and mask, depending on enabled lane
- * count.
- */
- switch (dp->lanes) {
- /* lane 0 */
- case (1):
- pll_bits = 0x00000001;
- break;
- /* lanes 0-1 */
- case (2):
- pll_bits = 0x00000003;
- break;
- /* lanes 0-3, all */
- default:
- pll_bits = 0x0000000F;
- break;
- }
+ /* Select values of registers and mask, depending on enabled lane count. */
+ pll_val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
- if (enable)
- pll_val = pll_bits;
- else
- pll_val = 0x00000000;
+ if (enable) {
+ pll_bits = ((1 << dp->lanes) - 1);
+ pll_val |= pll_bits;
+ pll_ack_val = pll_bits;
+ } else {
+ pll_bits = ((1 << inst->num_lanes) - 1);
+ pll_val &= (~pll_bits);
+ pll_ack_val = 0;
+ }
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, pll_val);
@@ -1025,22 +1055,23 @@ static int cdns_torrent_dp_set_pll_en(struct cdns_torrent_phy *cdns_phy,
ret = regmap_read_poll_timeout(regmap,
PHY_PMA_XCVR_PLLCLK_EN_ACK,
rd_val,
- (rd_val & pll_bits) == pll_val,
+ (rd_val & pll_bits) == pll_ack_val,
0, POLL_TIMEOUT_US);
ndelay(100);
return ret;
}
static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 num_lanes,
enum phy_powerstate powerstate)
{
/* Register value for power state for a single byte. */
- u32 value_part;
- u32 value;
- u32 mask;
+ u32 value_part, i;
+ u32 value = 0;
+ u32 mask = 0;
u32 read_val;
- u32 ret;
+ int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
switch (powerstate) {
@@ -1056,29 +1087,11 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
break;
}
- /* Select values of registers and mask, depending on enabled
- * lane count.
- */
- switch (num_lanes) {
- /* lane 0 */
- case (1):
- value = value_part;
- mask = 0x0000003FU;
- break;
- /* lanes 0-1 */
- case (2):
- value = (value_part
- | (value_part << 8));
- mask = 0x00003F3FU;
- break;
- /* lanes 0-3, all */
- default:
- value = (value_part
- | (value_part << 8)
- | (value_part << 16)
- | (value_part << 24));
- mask = 0x3F3F3F3FU;
- break;
+ /* Select values of registers and mask, depending on enabled lane count. */
+
+ for (i = 0; i < num_lanes; i++) {
+ value |= (value_part << PHY_POWER_STATE_LN(i));
+ mask |= (PMA_XCVR_POWER_STATE_REQ_LN_MASK << PHY_POWER_STATE_LN(i));
}
/* Set power state A<n>. */
@@ -1093,7 +1106,8 @@ static int cdns_torrent_dp_set_power_state(struct cdns_torrent_phy *cdns_phy,
return ret;
}
-static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
+static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst, u32 num_lanes)
{
unsigned int read_val;
int ret;
@@ -1114,12 +1128,12 @@ static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes)
ndelay(100);
- ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
POWERSTATE_A2);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, num_lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, num_lanes,
POWERSTATE_A0);
return ret;
@@ -1143,6 +1157,7 @@ static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy)
}
static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 rate, u32 num_lanes)
{
unsigned int clk_sel_val = 0;
@@ -1175,14 +1190,17 @@ static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
break;
}
- cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
- CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
- cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
- CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
+ if (cdns_phy->dp_pll & DP_PLL0)
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL0_CLK_SEL_M0, clk_sel_val);
+
+ if (cdns_phy->dp_pll & DP_PLL1)
+ cdns_torrent_phy_write(cdns_phy->regmap_common_cdb,
+ CMN_PDIAG_PLL1_CLK_SEL_M0, clk_sel_val);
/* PMA lane configuration to deal with multi-link operation */
for (i = 0; i < num_lanes; i++)
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[i],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + i],
XCVR_DIAG_HSCLK_DIV, hsclk_div_val);
}
@@ -1191,23 +1209,44 @@ static void cdns_torrent_dp_pma_cmn_rate(struct cdns_torrent_phy *cdns_phy,
* set and PLL disable request was processed.
*/
static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 read_val, ret;
+ u32 read_val, field_val;
+ int ret;
- /* Disable the cmn_pll0_en before re-programming the new data rate. */
- regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x0);
+ /*
+ * Disable the associated PLL (cmn_pll0_en or cmn_pll1_en) before
+ * re-programming the new data rate.
+ */
+ ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
+ if (ret)
+ return ret;
+ field_val &= ~(cdns_phy->dp_pll);
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready de-assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[2] == 1
+ * For PLL1 - PHY_PMA_CMN_CTRL2[3] == 1
*/
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
- read_val,
- ((read_val >> 2) & 0x01) != 0,
- 0, POLL_TIMEOUT_US);
- if (ret)
- return ret;
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 2) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
+
+ if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 3) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
ndelay(200);
/* DP Rate Change - VCO Output settings. */
@@ -1221,19 +1260,35 @@ static int cdns_torrent_dp_configure_rate(struct cdns_torrent_phy *cdns_phy,
/* PMA common configuration 100MHz */
cdns_torrent_dp_pma_cmn_vco_cfg_100mhz(cdns_phy, dp->link_rate, dp->ssc);
- cdns_torrent_dp_pma_cmn_rate(cdns_phy, dp->link_rate, dp->lanes);
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, dp->link_rate, dp->lanes);
- /* Enable the cmn_pll0_en. */
- regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, 0x3);
+ /* Enable the associated PLL (cmn_pll0_en or cmn_pll1_en) */
+ ret = regmap_field_read(cdns_phy->phy_pma_pll_raw_ctrl, &field_val);
+ if (ret)
+ return ret;
+ field_val |= cdns_phy->dp_pll;
+ regmap_field_write(cdns_phy->phy_pma_pll_raw_ctrl, field_val);
/*
* Wait for PLL ready assertion.
* For PLL0 - PHY_PMA_CMN_CTRL2[0] == 1
+ * For PLL1 - PHY_PMA_CMN_CTRL2[1] == 1
*/
- ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
- read_val,
- (read_val & 0x01) != 0,
- 0, POLL_TIMEOUT_US);
+ if (cdns_phy->dp_pll & DP_PLL0) {
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ (read_val & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+ }
+
+ if ((cdns_phy->dp_pll & DP_PLL1) && cdns_phy->nsubnodes != 1)
+ ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_2,
+ read_val,
+ ((read_val >> 1) & 0x01) != 0,
+ 0, POLL_TIMEOUT_US);
+
return ret;
}
@@ -1301,6 +1356,7 @@ static int cdns_torrent_dp_verify_config(struct cdns_torrent_inst *inst,
/* Set power state A0 and PLL clock enable to 0 on enabled lanes. */
static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
u32 num_lanes)
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
@@ -1308,27 +1364,13 @@ static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
PHY_PMA_XCVR_POWER_STATE_REQ);
u32 pll_clk_en = cdns_torrent_dp_read(regmap,
PHY_PMA_XCVR_PLLCLK_EN);
+ u32 i;
- /* Lane 0 is always enabled. */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_0);
- pll_clk_en &= ~0x01U;
-
- if (num_lanes > 1) {
- /* lane 1 */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_1);
- pll_clk_en &= ~(0x01U << 1);
- }
+ for (i = 0; i < num_lanes; i++) {
+ pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK
+ << PHY_POWER_STATE_LN(inst->mlane + i));
- if (num_lanes > 2) {
- /* lanes 2 and 3 */
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_2);
- pwr_state &= ~(PMA_XCVR_POWER_STATE_REQ_LN_MASK <<
- PHY_POWER_STATE_LN_3);
- pll_clk_en &= ~(0x01U << 2);
- pll_clk_en &= ~(0x01U << 3);
+ pll_clk_en &= ~(0x01U << (inst->mlane + i));
}
cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_POWER_STATE_REQ, pwr_state);
@@ -1337,36 +1379,57 @@ static void cdns_torrent_dp_set_a0_pll(struct cdns_torrent_phy *cdns_phy,
/* Configure lane count as required. */
static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 value;
- u32 ret;
+ u32 value, i;
+ int ret;
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
u8 lane_mask = (1 << dp->lanes) - 1;
+ u8 pma_tx_elec_idle_mask = 0;
+ u32 clane = inst->mlane;
+
+ lane_mask <<= clane;
value = cdns_torrent_dp_read(regmap, PHY_RESET);
/* clear pma_tx_elec_idle_ln_* bits. */
- value &= ~PMA_TX_ELEC_IDLE_MASK;
+ pma_tx_elec_idle_mask = ((1 << inst->num_lanes) - 1) << clane;
+
+ pma_tx_elec_idle_mask <<= PMA_TX_ELEC_IDLE_SHIFT;
+
+ value &= ~pma_tx_elec_idle_mask;
+
/* Assert pma_tx_elec_idle_ln_* for disabled lanes. */
value |= ((~lane_mask) << PMA_TX_ELEC_IDLE_SHIFT) &
- PMA_TX_ELEC_IDLE_MASK;
+ pma_tx_elec_idle_mask;
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
- /* reset the link by asserting phy_l00_reset_n low */
+ /* reset the link by asserting master lane phy_l0*_reset_n low */
cdns_torrent_dp_write(regmap, PHY_RESET,
- value & (~PHY_L00_RESET_N_MASK));
+ value & (~(1 << clane)));
/*
- * Assert lane reset on unused lanes and lane 0 so they remain in reset
+ * Assert lane reset on unused lanes and master lane so they remain in reset
* and powered down when re-enabling the link
*/
- value = (value & 0x0000FFF0) | (0x0000000E & lane_mask);
+ for (i = 0; i < inst->num_lanes; i++)
+ value &= (~(1 << (clane + i)));
+
+ for (i = 1; i < inst->num_lanes; i++)
+ value |= ((1 << (clane + i)) & lane_mask);
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
- cdns_torrent_dp_set_a0_pll(cdns_phy, dp->lanes);
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst, dp->lanes);
/* release phy_l0*_reset_n based on used laneCount */
- value = (value & 0x0000FFF0) | (0x0000000F & lane_mask);
+ for (i = 0; i < inst->num_lanes; i++)
+ value &= (~(1 << (clane + i)));
+
+ for (i = 0; i < inst->num_lanes; i++)
+ value |= ((1 << (clane + i)) & lane_mask);
+
cdns_torrent_dp_write(regmap, PHY_RESET, value);
/* Wait, until PHY gets ready after releasing PHY reset signal. */
@@ -1377,41 +1440,44 @@ static int cdns_torrent_dp_set_lanes(struct cdns_torrent_phy *cdns_phy,
ndelay(100);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+ value = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
+ value |= (1 << clane);
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, value);
- ret = cdns_torrent_dp_run(cdns_phy, dp->lanes);
+ ret = cdns_torrent_dp_run(cdns_phy, inst, dp->lanes);
return ret;
}
/* Configure link rate as required. */
static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
- u32 ret;
+ int ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A3);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, false);
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, false);
if (ret)
return ret;
ndelay(200);
- ret = cdns_torrent_dp_configure_rate(cdns_phy, dp);
+ ret = cdns_torrent_dp_configure_rate(cdns_phy, inst, dp);
if (ret)
return ret;
ndelay(200);
- ret = cdns_torrent_dp_set_pll_en(cdns_phy, dp, true);
+ ret = cdns_torrent_dp_set_pll_en(cdns_phy, inst, dp, true);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A2);
if (ret)
return ret;
- ret = cdns_torrent_dp_set_power_state(cdns_phy, dp->lanes,
+ ret = cdns_torrent_dp_set_power_state(cdns_phy, inst, dp->lanes,
POWERSTATE_A0);
if (ret)
return ret;
@@ -1422,44 +1488,45 @@ static int cdns_torrent_dp_set_rate(struct cdns_torrent_phy *cdns_phy,
/* Configure voltage swing and pre-emphasis for all enabled lanes. */
static void cdns_torrent_dp_set_voltages(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
struct phy_configure_opts_dp *dp)
{
u8 lane;
u16 val;
for (lane = 0; lane < dp->lanes; lane++) {
- val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 1 to register bit TX_DIAG_ACYA[0] to freeze the
* current state of the analog TX driver.
*/
val |= TX_DIAG_ACYA_HBDC_MASK;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CTRL, 0x08A4);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].diag_tx_drv;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
DRV_DIAG_TX_DRV, val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].mgnfs_mult;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_MGNFS_MULT_000,
val);
val = vltg_coeff[dp->voltage[lane]][dp->pre[lane]].cpost_mult;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_TXCC_CPOST_MULT_00,
val);
- val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[lane],
+ val = cdns_torrent_phy_read(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA);
/*
* Write 0 to register bit TX_DIAG_ACYA[0] to allow the state of
* analog TX driver to reflect the new programmed one.
*/
val &= ~TX_DIAG_ACYA_HBDC_MASK;
- cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[lane],
+ cdns_torrent_phy_write(cdns_phy->regmap_tx_lane_cdb[inst->mlane + lane],
TX_DIAG_ACYA, val);
}
};
@@ -1478,7 +1545,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_lanes) {
- ret = cdns_torrent_dp_set_lanes(cdns_phy, &opts->dp);
+ ret = cdns_torrent_dp_set_lanes(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_lanes failed\n");
return ret;
@@ -1486,7 +1553,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_rate) {
- ret = cdns_torrent_dp_set_rate(cdns_phy, &opts->dp);
+ ret = cdns_torrent_dp_set_rate(cdns_phy, inst, &opts->dp);
if (ret) {
dev_err(&phy->dev, "cdns_torrent_dp_set_rate failed\n");
return ret;
@@ -1494,7 +1561,7 @@ static int cdns_torrent_dp_configure(struct phy *phy,
}
if (opts->dp.set_voltages)
- cdns_torrent_dp_set_voltages(cdns_phy, &opts->dp);
+ cdns_torrent_dp_set_voltages(cdns_phy, inst, &opts->dp);
return ret;
}
@@ -1562,6 +1629,7 @@ static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
{
struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg;
unsigned char lane_bits;
+ u32 val;
cdns_torrent_dp_write(regmap, PHY_AUX_CTRL, 0x0003); /* enable AUX */
@@ -1569,18 +1637,23 @@ static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
* Set lines power state to A0
* Set lines pll clk enable to 0
*/
- cdns_torrent_dp_set_a0_pll(cdns_phy, inst->num_lanes);
+ cdns_torrent_dp_set_a0_pll(cdns_phy, inst, inst->num_lanes);
/*
* release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
* used lanes
*/
lane_bits = (1 << inst->num_lanes) - 1;
- cdns_torrent_dp_write(regmap, PHY_RESET,
- ((0xF & ~lane_bits) << 4) | (0xF & lane_bits));
+
+ val = cdns_torrent_dp_read(regmap, PHY_RESET);
+ val |= (0xF & lane_bits);
+ val &= ~(lane_bits << 4);
+ cdns_torrent_dp_write(regmap, PHY_RESET, val);
/* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
- cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, 0x0001);
+ val = cdns_torrent_dp_read(regmap, PHY_PMA_XCVR_PLLCLK_EN);
+ val |= 1;
+ cdns_torrent_dp_write(regmap, PHY_PMA_XCVR_PLLCLK_EN, val);
/*
* PHY PMA registers configuration functions
@@ -1599,7 +1672,7 @@ static void cdns_torrent_dp_common_init(struct cdns_torrent_phy *cdns_phy,
cdns_phy->max_bit_rate,
false);
- cdns_torrent_dp_pma_cmn_rate(cdns_phy, cdns_phy->max_bit_rate,
+ cdns_torrent_dp_pma_cmn_rate(cdns_phy, inst, cdns_phy->max_bit_rate,
inst->num_lanes);
/* take out of reset */
@@ -1612,13 +1685,15 @@ static int cdns_torrent_dp_start(struct cdns_torrent_phy *cdns_phy,
{
int ret;
- cdns_torrent_phy_on(phy);
+ ret = cdns_torrent_phy_on(phy);
+ if (ret)
+ return ret;
ret = cdns_torrent_dp_wait_pma_cmn_ready(cdns_phy);
if (ret)
return ret;
- ret = cdns_torrent_dp_run(cdns_phy, inst->num_lanes);
+ ret = cdns_torrent_dp_run(cdns_phy, inst, inst->num_lanes);
return ret;
}
@@ -1627,6 +1702,7 @@ static int cdns_torrent_dp_init(struct phy *phy)
{
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
switch (cdns_phy->ref_clk_rate) {
case CLK_19_2_MHZ:
@@ -1639,6 +1715,24 @@ static int cdns_torrent_dp_init(struct phy *phy)
return -EINVAL;
}
+ ret = cdns_torrent_dp_get_pll(cdns_phy, TYPE_NONE);
+ if (ret)
+ return ret;
+
+ cdns_torrent_dp_common_init(cdns_phy, inst);
+
+ return cdns_torrent_dp_start(cdns_phy, inst, phy);
+}
+
+static int cdns_torrent_dp_multilink_init(struct cdns_torrent_phy *cdns_phy,
+ struct cdns_torrent_inst *inst,
+ struct phy *phy)
+{
+ if (cdns_phy->ref_clk_rate != CLK_100_MHZ) {
+ dev_err(cdns_phy->dev, "Unsupported Ref Clock Rate\n");
+ return -EINVAL;
+ }
+
cdns_torrent_dp_common_init(cdns_phy, inst);
return cdns_torrent_dp_start(cdns_phy, inst, phy);
@@ -2155,8 +2249,11 @@ static int cdns_torrent_phy_init(struct phy *phy)
u32 num_regs;
int i, j;
- if (cdns_phy->nsubnodes > 1)
+ if (cdns_phy->nsubnodes > 1) {
+ if (phy_type == TYPE_DP)
+ return cdns_torrent_dp_multilink_init(cdns_phy, inst, phy);
return 0;
+ }
/**
* Spread spectrum generation is not required or supported
@@ -2398,6 +2495,12 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
}
+ if (phy_t1 == TYPE_DP) {
+ ret = cdns_torrent_dp_get_pll(cdns_phy, phy_t2);
+ if (ret)
+ return ret;
+ }
+
reset_control_deassert(cdns_phy->phys[node].lnk_rst);
}
@@ -2793,6 +2896,109 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev)
cdns_torrent_clk_cleanup(cdns_phy);
}
+/* USB and DP link configuration */
+static struct cdns_reg_pairs usb_dp_link_cmn_regs[] = {
+ {0x0002, PHY_PLL_CFG},
+ {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs usb_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0041, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs dp_usb_xcvr_diag_ln_regs[] = {
+ {0x0001, XCVR_DIAG_HSCLK_SEL},
+ {0x0009, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals usb_dp_link_cmn_vals = {
+ .reg_pairs = usb_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(usb_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals usb_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = usb_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(usb_dp_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
+ .reg_pairs = dp_usb_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
+};
+
+/* PCIe and DP link configuration */
+static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
+ {0x0003, PHY_PLL_CFG},
+ {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}
+};
+
+static struct cdns_reg_pairs pcie_dp_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0012, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_reg_pairs dp_pcie_xcvr_diag_ln_regs[] = {
+ {0x0001, XCVR_DIAG_HSCLK_SEL},
+ {0x0009, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals pcie_dp_link_cmn_vals = {
+ .reg_pairs = pcie_dp_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_dp_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals pcie_dp_xcvr_diag_ln_vals = {
+ .reg_pairs = pcie_dp_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(pcie_dp_xcvr_diag_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_pcie_xcvr_diag_ln_vals = {
+ .reg_pairs = dp_pcie_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_pcie_xcvr_diag_ln_regs),
+};
+
+/* DP Multilink, 100 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs dp_100_no_ssc_cmn_regs[] = {
+ {0x007F, CMN_TXPUCAL_TUNE},
+ {0x007F, CMN_TXPDCAL_TUNE}
+};
+
+static struct cdns_reg_pairs dp_100_no_ssc_tx_ln_regs[] = {
+ {0x00FB, TX_PSC_A0},
+ {0x04AA, TX_PSC_A2},
+ {0x04AA, TX_PSC_A3},
+ {0x000F, XCVR_DIAG_BIDI_CTRL}
+};
+
+static struct cdns_reg_pairs dp_100_no_ssc_rx_ln_regs[] = {
+ {0x0000, RX_PSC_A0},
+ {0x0000, RX_PSC_A2},
+ {0x0000, RX_PSC_A3},
+ {0x0000, RX_PSC_CAL},
+ {0x0000, RX_REE_GCSM1_CTRL},
+ {0x0000, RX_REE_GCSM2_CTRL},
+ {0x0000, RX_REE_PERGCSM_CTRL}
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_cmn_vals = {
+ .reg_pairs = dp_100_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_tx_ln_vals = {
+ .reg_pairs = dp_100_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals dp_100_no_ssc_rx_ln_vals = {
+ .reg_pairs = dp_100_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(dp_100_no_ssc_rx_ln_regs),
+};
+
/* Single DisplayPort(DP) link configuration */
static struct cdns_reg_pairs sl_dp_link_cmn_regs[] = {
{0x0000, PHY_PLL_CFG},
@@ -3735,6 +3941,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_link_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -3757,6 +3969,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
[INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -3809,6 +4024,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
[INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
},
.xcvr_diag_vals = {
@@ -3816,6 +4034,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -3838,6 +4062,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -3890,6 +4117,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
+ },
},
},
.pcs_cmn_vals = {
@@ -3914,6 +4144,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ },
},
},
.cmn_vals = {
@@ -3936,6 +4169,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -3958,6 +4197,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4010,6 +4252,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ },
},
},
},
@@ -4033,6 +4278,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4055,6 +4306,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = NULL,
[INTERNAL_SSC] = NULL,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4107,6 +4361,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
},
},
},
@@ -4130,6 +4387,12 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4152,6 +4415,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4204,6 +4470,9 @@ static const struct cdns_torrent_data cdns_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
},
},
},
@@ -4217,6 +4486,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_link_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4239,6 +4514,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
[INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_link_cmn_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4291,6 +4569,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
[INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_link_cmn_vals,
+ },
},
},
.xcvr_diag_vals = {
@@ -4298,6 +4579,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4320,6 +4607,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4372,6 +4662,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
[INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
+ },
},
},
.pcs_cmn_vals = {
@@ -4396,6 +4689,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
[INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_phy_pcs_cmn_vals,
+ },
},
},
.cmn_vals = {
@@ -4418,6 +4714,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_cmn_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4440,6 +4742,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4492,6 +4797,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
[INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_cmn_vals,
+ },
},
},
},
@@ -4515,6 +4823,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4537,6 +4851,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = NULL,
[INTERNAL_SSC] = NULL,
},
+ [TYPE_DP] = {
+ [NO_SSC] = NULL,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4589,6 +4906,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
+ },
},
},
},
@@ -4612,6 +4932,12 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[TYPE_NONE] = {
[NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
},
+ [TYPE_PCIE] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
+ [TYPE_USB] = {
+ [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_PCIE] = {
[TYPE_NONE] = {
@@ -4634,6 +4960,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
+ },
},
[TYPE_SGMII] = {
[TYPE_NONE] = {
@@ -4686,6 +5015,9 @@ static const struct cdns_torrent_data ti_j721e_map_torrent = {
[EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
[INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
},
+ [TYPE_DP] = {
+ [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
+ },
},
},
},
diff --git a/drivers/phy/microchip/sparx5_serdes.c b/drivers/phy/microchip/sparx5_serdes.c
index ab1b0986aa67..01bd5ea620c5 100644
--- a/drivers/phy/microchip/sparx5_serdes.c
+++ b/drivers/phy/microchip/sparx5_serdes.c
@@ -25,12 +25,17 @@
#define SPX5_SERDES_10G_START 13
#define SPX5_SERDES_25G_START 25
+#define SPX5_SERDES_6G10G_CNT SPX5_SERDES_25G_START
+
+/* Optimal power settings from GUC */
+#define SPX5_SERDES_QUIET_MODE_VAL 0x01ef4e0c
enum sparx5_10g28cmu_mode {
SPX5_SD10G28_CMU_MAIN = 0,
SPX5_SD10G28_CMU_AUX1 = 1,
SPX5_SD10G28_CMU_AUX2 = 3,
SPX5_SD10G28_CMU_NONE = 4,
+ SPX5_SD10G28_CMU_MAX,
};
enum sparx5_sd25g28_mode_preset_type {
@@ -922,6 +927,222 @@ static void sparx5_sd10g28_get_params(struct sparx5_serdes_macro *macro,
*params = init;
}
+static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
+ u32 cmu_idx,
+ void __iomem *cmu_tgt,
+ void __iomem *cmu_cfg_tgt,
+ u32 spd10g)
+{
+ void __iomem **regs = priv->regs;
+ struct device *dev = priv->dev;
+ int value;
+
+ cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+ cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+ if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+ cmu_idx == 10 || cmu_idx == 13) {
+ spd10g = 0;
+ }
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
+ SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
+ SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
+ SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
+ SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
+ SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
+ cmu_tgt,
+ SD_CMU_CMU_45(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
+ SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
+ cmu_tgt,
+ SD_CMU_CMU_47(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
+ SD_CMU_CMU_1B_CFG_RESERVE_7_0,
+ cmu_tgt,
+ SD_CMU_CMU_1B(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
+ SD_CMU_CMU_0D_CFG_JC_BYP,
+ cmu_tgt,
+ SD_CMU_CMU_0D(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
+ SD_CMU_CMU_1F_CFG_VTUNE_SEL,
+ cmu_tgt,
+ SD_CMU_CMU_1F(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
+ SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
+ cmu_tgt,
+ SD_CMU_CMU_00(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
+ SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
+ cmu_tgt,
+ SD_CMU_CMU_05(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
+ SD_CMU_CMU_30_R_PLL_DLOL_EN,
+ cmu_tgt,
+ SD_CMU_CMU_30(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
+ SD_CMU_CMU_09_CFG_SW_10G,
+ cmu_tgt,
+ SD_CMU_CMU_09(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
+ cmu_cfg_tgt,
+ SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
+
+ msleep(20);
+
+ sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
+ SD_CMU_CMU_44_R_PLL_RSTN,
+ cmu_tgt,
+ SD_CMU_CMU_44(cmu_idx));
+
+ sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
+ SD_CMU_CMU_44_R_PLL_RSTN,
+ cmu_tgt,
+ SD_CMU_CMU_44(cmu_idx));
+
+ msleep(20);
+
+ value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
+ value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
+
+ if (value) {
+ dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
+ return -EINVAL;
+ }
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
+ SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
+ cmu_tgt,
+ SD_CMU_CMU_0D(cmu_idx));
+ return 0;
+}
+
+static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
+{
+ void __iomem *cmu_tgt, *cmu_cfg_tgt;
+ u32 spd10g = 1;
+
+ if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
+ cmu_idx == 10 || cmu_idx == 13) {
+ spd10g = 0;
+ }
+
+ cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
+ cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
+
+ return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
+}
+
+/* Map of 6G/10G serdes mode and index to CMU index. */
+static const int
+sparx5_serdes_cmu_map[SPX5_SD10G28_CMU_MAX][SPX5_SERDES_6G10G_CNT] = {
+ [SPX5_SD10G28_CMU_MAIN] = { 2, 2, 2, 2, 2,
+ 2, 2, 2, 5, 5,
+ 5, 5, 5, 5, 5,
+ 5, 8, 11, 11, 11,
+ 11, 11, 11, 11, 11 },
+ [SPX5_SD10G28_CMU_AUX1] = { 0, 0, 3, 3, 3,
+ 3, 3, 3, 3, 3,
+ 6, 6, 6, 6, 6,
+ 6, 6, 9, 9, 12,
+ 12, 12, 12, 12, 12 },
+ [SPX5_SD10G28_CMU_AUX2] = { 1, 1, 1, 1, 4,
+ 4, 4, 4, 4, 4,
+ 4, 4, 7, 7, 7,
+ 7, 7, 10, 10, 10,
+ 10, 13, 13, 13, 13 },
+ [SPX5_SD10G28_CMU_NONE] = { 1, 1, 1, 1, 4,
+ 4, 4, 4, 4, 4,
+ 4, 4, 7, 7, 7,
+ 7, 7, 10, 10, 10,
+ 10, 13, 13, 13, 13 },
+};
+
+/* Get the index of the CMU which provides the clock for the specified serdes
+ * mode and index.
+ */
+static int sparx5_serdes_cmu_get(enum sparx5_10g28cmu_mode mode, int sd_index)
+{
+ return sparx5_serdes_cmu_map[mode][sd_index];
+}
+
+static void sparx5_serdes_cmu_power_off(struct sparx5_serdes_private *priv)
+{
+ void __iomem *cmu_inst, *cmu_cfg_inst;
+ int i;
+
+ /* Power down each CMU */
+ for (i = 0; i < SPX5_CMU_MAX; i++) {
+ cmu_inst = sdx5_inst_get(priv, TARGET_SD_CMU, i);
+ cmu_cfg_inst = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, i);
+
+ sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
+ SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST, cmu_cfg_inst,
+ SD_CMU_CFG_SD_CMU_CFG(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_05_CFG_REFCK_TERM_EN_SET(0),
+ SD_CMU_CMU_05_CFG_REFCK_TERM_EN, cmu_inst,
+ SD_CMU_CMU_05(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_EN_TX_CK_DN_SET(0),
+ SD_CMU_CMU_09_CFG_EN_TX_CK_DN, cmu_inst,
+ SD_CMU_CMU_09(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_06_CFG_VCO_PD_SET(1),
+ SD_CMU_CMU_06_CFG_VCO_PD, cmu_inst,
+ SD_CMU_CMU_06(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_09_CFG_EN_TX_CK_UP_SET(0),
+ SD_CMU_CMU_09_CFG_EN_TX_CK_UP, cmu_inst,
+ SD_CMU_CMU_09(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_08_CFG_CK_TREE_PD_SET(1),
+ SD_CMU_CMU_08_CFG_CK_TREE_PD, cmu_inst,
+ SD_CMU_CMU_08(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_REFCK_PD_SET(1) |
+ SD_CMU_CMU_0D_CFG_PD_DIV64_SET(1) |
+ SD_CMU_CMU_0D_CFG_PD_DIV66_SET(1),
+ SD_CMU_CMU_0D_CFG_REFCK_PD |
+ SD_CMU_CMU_0D_CFG_PD_DIV64 |
+ SD_CMU_CMU_0D_CFG_PD_DIV66, cmu_inst,
+ SD_CMU_CMU_0D(0));
+
+ sdx5_inst_rmw(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_SET(1),
+ SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, cmu_inst,
+ SD_CMU_CMU_06(0));
+ }
+}
+
static void sparx5_sd25g28_reset(void __iomem *regs[],
struct sparx5_sd25g28_params *params,
u32 sd_index)
@@ -1422,7 +1643,17 @@ static int sparx5_sd10g28_apply_params(struct sparx5_serdes_macro *macro,
u32 lane_index = macro->sidx;
u32 sd_index = macro->stpidx;
void __iomem *sd_inst;
- u32 value;
+ u32 value, cmu_idx;
+ int err;
+
+ /* Do not configure serdes if CMU is not to be configured too */
+ if (params->skip_cmu_cfg)
+ return 0;
+
+ cmu_idx = sparx5_serdes_cmu_get(params->cmu_sel, lane_index);
+ err = sparx5_cmu_cfg(priv, cmu_idx);
+ if (err)
+ return err;
if (params->is_6g)
sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, sd_index);
@@ -1884,6 +2115,7 @@ static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
.rxinvert = 1,
.txswing = 240,
.reg_rst = reset,
+ .skip_cmu_cfg = reset,
};
int err;
@@ -1899,7 +2131,7 @@ static int sparx5_sd10g28_config(struct sparx5_serdes_macro *macro, bool reset)
static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
{
struct sparx5_serdes_private *priv = macro->priv;
- void __iomem *sd_inst;
+ void __iomem *sd_inst, *sd_lane_inst;
if (macro->serdestype == SPX5_SDT_6G)
sd_inst = sdx5_inst_get(priv, TARGET_SD6G_LANE, macro->stpidx);
@@ -1909,12 +2141,36 @@ static int sparx5_serdes_power_save(struct sparx5_serdes_macro *macro, u32 pwdn)
sd_inst = sdx5_inst_get(priv, TARGET_SD25G_LANE, macro->stpidx);
if (macro->serdestype == SPX5_SDT_25G) {
+ sd_lane_inst = sdx5_inst_get(priv, TARGET_SD_LANE_25G,
+ macro->stpidx);
+ /* Take serdes out of reset */
+ sdx5_inst_rmw(SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+ SD_LANE_25G_SD_LANE_CFG_EXT_CFG_RST, sd_lane_inst,
+ SD_LANE_25G_SD_LANE_CFG(0));
+
+ /* Configure optimal settings for quiet mode */
+ sdx5_inst_rmw(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_SET(SPX5_SERDES_QUIET_MODE_VAL),
+ SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE,
+ sd_lane_inst, SD_LANE_25G_QUIET_MODE_6G(0));
+
sdx5_inst_rmw(SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER_SET(pwdn),
SD25G_LANE_LANE_04_LN_CFG_PD_DRIVER,
sd_inst,
SD25G_LANE_LANE_04(0));
} else {
/* 6G and 10G */
+ sd_lane_inst = sdx5_inst_get(priv, TARGET_SD_LANE, macro->sidx);
+
+ /* Take serdes out of reset */
+ sdx5_inst_rmw(SD_LANE_SD_LANE_CFG_EXT_CFG_RST_SET(0),
+ SD_LANE_SD_LANE_CFG_EXT_CFG_RST, sd_lane_inst,
+ SD_LANE_SD_LANE_CFG(0));
+
+ /* Configure optimal settings for quiet mode */
+ sdx5_inst_rmw(SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(SPX5_SERDES_QUIET_MODE_VAL),
+ SD_LANE_QUIET_MODE_6G_QUIET_MODE, sd_lane_inst,
+ SD_LANE_QUIET_MODE_6G(0));
+
sdx5_inst_rmw(SD10G_LANE_LANE_06_CFG_PD_DRIVER_SET(pwdn),
SD10G_LANE_LANE_06_CFG_PD_DRIVER,
sd_inst,
@@ -1939,159 +2195,6 @@ static int sparx5_serdes_clock_config(struct sparx5_serdes_macro *macro)
return 0;
}
-static int sparx5_cmu_apply_cfg(struct sparx5_serdes_private *priv,
- u32 cmu_idx,
- void __iomem *cmu_tgt,
- void __iomem *cmu_cfg_tgt,
- u32 spd10g)
-{
- void __iomem **regs = priv->regs;
- struct device *dev = priv->dev;
- int value;
-
- cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
- cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
-
- if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
- cmu_idx == 10 || cmu_idx == 13) {
- spd10g = 0;
- }
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(1),
- SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST_SET(0),
- SD_CMU_CFG_SD_CMU_CFG_EXT_CFG_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(1),
- SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT_SET(0x1) |
- SD_CMU_CMU_45_R_EN_RATECHG_CTRL_SET(0x0),
- SD_CMU_CMU_45_R_DWIDTHCTRL_FROM_HWT |
- SD_CMU_CMU_45_R_REFCK_SSC_EN_FROM_HWT |
- SD_CMU_CMU_45_R_LINK_BUF_EN_FROM_HWT |
- SD_CMU_CMU_45_R_BIAS_EN_FROM_HWT |
- SD_CMU_CMU_45_R_EN_RATECHG_CTRL,
- cmu_tgt,
- SD_CMU_CMU_45(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0_SET(0),
- SD_CMU_CMU_47_R_PCS2PMA_PHYMODE_4_0,
- cmu_tgt,
- SD_CMU_CMU_47(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_1B_CFG_RESERVE_7_0_SET(0),
- SD_CMU_CMU_1B_CFG_RESERVE_7_0,
- cmu_tgt,
- SD_CMU_CMU_1B(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_JC_BYP_SET(0x1),
- SD_CMU_CMU_0D_CFG_JC_BYP,
- cmu_tgt,
- SD_CMU_CMU_0D(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_1F_CFG_VTUNE_SEL_SET(1),
- SD_CMU_CMU_1F_CFG_VTUNE_SEL,
- cmu_tgt,
- SD_CMU_CMU_1F(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0_SET(3),
- SD_CMU_CMU_00_CFG_PLL_TP_SEL_1_0,
- cmu_tgt,
- SD_CMU_CMU_00(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_SET(3),
- SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0,
- cmu_tgt,
- SD_CMU_CMU_05(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_30_R_PLL_DLOL_EN_SET(1),
- SD_CMU_CMU_30_R_PLL_DLOL_EN,
- cmu_tgt,
- SD_CMU_CMU_30(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_09_CFG_SW_10G_SET(spd10g),
- SD_CMU_CMU_09_CFG_SW_10G,
- cmu_tgt,
- SD_CMU_CMU_09(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CFG_SD_CMU_CFG_CMU_RST_SET(0),
- SD_CMU_CFG_SD_CMU_CFG_CMU_RST,
- cmu_cfg_tgt,
- SD_CMU_CFG_SD_CMU_CFG(cmu_idx));
-
- msleep(20);
-
- sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(0),
- SD_CMU_CMU_44_R_PLL_RSTN,
- cmu_tgt,
- SD_CMU_CMU_44(cmu_idx));
-
- sdx5_inst_rmw(SD_CMU_CMU_44_R_PLL_RSTN_SET(1),
- SD_CMU_CMU_44_R_PLL_RSTN,
- cmu_tgt,
- SD_CMU_CMU_44(cmu_idx));
-
- msleep(20);
-
- value = readl(sdx5_addr(regs, SD_CMU_CMU_E0(cmu_idx)));
- value = SD_CMU_CMU_E0_PLL_LOL_UDL_GET(value);
-
- if (value) {
- dev_err(dev, "CMU PLL Loss of Lock: 0x%x\n", value);
- return -EINVAL;
- }
- sdx5_inst_rmw(SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD_SET(0),
- SD_CMU_CMU_0D_CFG_PMA_TX_CK_PD,
- cmu_tgt,
- SD_CMU_CMU_0D(cmu_idx));
- return 0;
-}
-
-static int sparx5_cmu_cfg(struct sparx5_serdes_private *priv, u32 cmu_idx)
-{
- void __iomem *cmu_tgt, *cmu_cfg_tgt;
- u32 spd10g = 1;
-
- if (cmu_idx == 1 || cmu_idx == 4 || cmu_idx == 7 ||
- cmu_idx == 10 || cmu_idx == 13) {
- spd10g = 0;
- }
-
- cmu_tgt = sdx5_inst_get(priv, TARGET_SD_CMU, cmu_idx);
- cmu_cfg_tgt = sdx5_inst_get(priv, TARGET_SD_CMU_CFG, cmu_idx);
-
- return sparx5_cmu_apply_cfg(priv, cmu_idx, cmu_tgt, cmu_cfg_tgt, spd10g);
-}
-
-static int sparx5_serdes_cmu_enable(struct sparx5_serdes_private *priv)
-{
- int idx, err = 0;
-
- if (!priv->cmu_enabled) {
- for (idx = 0; idx < SPX5_CMU_MAX; idx++) {
- err = sparx5_cmu_cfg(priv, idx);
- if (err) {
- dev_err(priv->dev, "CMU %u, error: %d\n", idx, err);
- goto leave;
- }
- }
- priv->cmu_enabled = true;
- }
-leave:
- return err;
-}
-
static int sparx5_serdes_get_serdesmode(phy_interface_t portmode, int speed)
{
switch (portmode) {
@@ -2120,10 +2223,6 @@ static int sparx5_serdes_config(struct sparx5_serdes_macro *macro)
int serdesmode;
int err;
- err = sparx5_serdes_cmu_enable(macro->priv);
- if (err)
- return err;
-
serdesmode = sparx5_serdes_get_serdesmode(macro->portmode, macro->speed);
if (serdesmode < 0) {
dev_err(dev, "SerDes %u, interface not supported: %s\n",
@@ -2215,9 +2314,6 @@ static int sparx5_serdes_reset(struct phy *phy)
struct sparx5_serdes_macro *macro = phy_get_drvdata(phy);
int err;
- err = sparx5_serdes_cmu_enable(macro->priv);
- if (err)
- return err;
if (macro->serdestype == SPX5_SDT_25G)
err = sparx5_sd25g28_config(macro, true);
else
@@ -2308,6 +2404,9 @@ static int sparx5_phy_create(struct sparx5_serdes_private *priv,
phy_set_drvdata(*phy, macro);
+ /* Power off serdes by default */
+ sparx5_serdes_power_off(*phy);
+
return 0;
}
@@ -2491,6 +2590,9 @@ static int sparx5_serdes_probe(struct platform_device *pdev)
return err;
}
+ /* Power down all CMUs by default */
+ sparx5_serdes_cmu_power_off(priv);
+
provider = devm_of_phy_provider_register(priv->dev, sparx5_serdes_xlate);
return PTR_ERR_OR_ZERO(provider);
diff --git a/drivers/phy/microchip/sparx5_serdes.h b/drivers/phy/microchip/sparx5_serdes.h
index 0a3e496e6210..13f94a29225a 100644
--- a/drivers/phy/microchip/sparx5_serdes.h
+++ b/drivers/phy/microchip/sparx5_serdes.h
@@ -30,7 +30,6 @@ struct sparx5_serdes_private {
struct device *dev;
void __iomem *regs[NUM_TARGETS];
struct phy *phys[SPX5_SERDES_MAX];
- bool cmu_enabled;
unsigned long coreclock;
};
diff --git a/drivers/phy/microchip/sparx5_serdes_regs.h b/drivers/phy/microchip/sparx5_serdes_regs.h
index b96386a4df5a..d0543fd3dc94 100644
--- a/drivers/phy/microchip/sparx5_serdes_regs.h
+++ b/drivers/phy/microchip/sparx5_serdes_regs.h
@@ -2149,6 +2149,92 @@ enum sparx5_serdes_target {
#define SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0_GET(x)\
FIELD_GET(SD_CMU_CMU_05_CFG_BIAS_TP_SEL_1_0, x)
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_06 */
+#define SD_CMU_CMU_06(t) \
+ __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 4, 0, 1, 4)
+
+#define SD_CMU_CMU_06_CFG_DISLOS BIT(0)
+#define SD_CMU_CMU_06_CFG_DISLOS_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_DISLOS, x)
+#define SD_CMU_CMU_06_CFG_DISLOS_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_DISLOS, x)
+
+#define SD_CMU_CMU_06_CFG_DISLOL BIT(1)
+#define SD_CMU_CMU_06_CFG_DISLOL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_DISLOL, x)
+#define SD_CMU_CMU_06_CFG_DISLOL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_DISLOL, x)
+
+#define SD_CMU_CMU_06_CFG_DCLOL BIT(2)
+#define SD_CMU_CMU_06_CFG_DCLOL_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_DCLOL, x)
+#define SD_CMU_CMU_06_CFG_DCLOL_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_DCLOL, x)
+
+#define SD_CMU_CMU_06_CFG_FORCE_RX_FILT BIT(3)
+#define SD_CMU_CMU_06_CFG_FORCE_RX_FILT_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_FORCE_RX_FILT, x)
+#define SD_CMU_CMU_06_CFG_FORCE_RX_FILT_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_FORCE_RX_FILT, x)
+
+#define SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD BIT(4)
+#define SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, x)
+#define SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_CTRL_LOGIC_PD, x)
+
+#define SD_CMU_CMU_06_CFG_VCO_PD BIT(5)
+#define SD_CMU_CMU_06_CFG_VCO_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_VCO_PD, x)
+#define SD_CMU_CMU_06_CFG_VCO_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_PD, x)
+
+#define SD_CMU_CMU_06_CFG_VCO_CAL_RESETN BIT(6)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_RESETN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_VCO_CAL_RESETN, x)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_RESETN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_CAL_RESETN, x)
+
+#define SD_CMU_CMU_06_CFG_VCO_CAL_BYP BIT(7)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_BYP_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_06_CFG_VCO_CAL_BYP, x)
+#define SD_CMU_CMU_06_CFG_VCO_CAL_BYP_GET(x)\
+ FIELD_GET(SD_CMU_CMU_06_CFG_VCO_CAL_BYP, x)
+
+/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_08 */
+#define SD_CMU_CMU_08(t) \
+ __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 12, 0, 1, 4)
+
+#define SD_CMU_CMU_08_CFG_VFILT2PAD BIT(0)
+#define SD_CMU_CMU_08_CFG_VFILT2PAD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_VFILT2PAD, x)
+#define SD_CMU_CMU_08_CFG_VFILT2PAD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_VFILT2PAD, x)
+
+#define SD_CMU_CMU_08_CFG_EN_DUMMY BIT(1)
+#define SD_CMU_CMU_08_CFG_EN_DUMMY_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_EN_DUMMY, x)
+#define SD_CMU_CMU_08_CFG_EN_DUMMY_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_EN_DUMMY, x)
+
+#define SD_CMU_CMU_08_CFG_CK_TREE_PD BIT(2)
+#define SD_CMU_CMU_08_CFG_CK_TREE_PD_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_CK_TREE_PD, x)
+#define SD_CMU_CMU_08_CFG_CK_TREE_PD_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_CK_TREE_PD, x)
+
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN BIT(3)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN, x)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN, x)
+
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN BIT(4)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN_SET(x)\
+ FIELD_PREP(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN, x)
+#define SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN_GET(x)\
+ FIELD_GET(SD_CMU_CMU_08_CFG_RST_TREE_PD_MAN_EN, x)
+
/* SD10G_CMU_TARGET:CMU_GRP_1:CMU_09 */
#define SD_CMU_CMU_09(t) __REG(TARGET_SD_CMU, t, 14, 20, 0, 1, 72, 16, 0, 1, 4)
@@ -2443,6 +2529,16 @@ enum sparx5_serdes_target {
#define SD_LANE_SD_LANE_STAT_DBG_OBS_GET(x)\
FIELD_GET(SD_LANE_SD_LANE_STAT_DBG_OBS, x)
+/* SD_LANE_TARGET:SD_PWR_CFG:QUIET_MODE_6G */
+#define SD_LANE_QUIET_MODE_6G(t) \
+ __REG(TARGET_SD_LANE, t, 25, 24, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0)
+#define SD_LANE_QUIET_MODE_6G_QUIET_MODE_SET(x)\
+ FIELD_PREP(SD_LANE_QUIET_MODE_6G_QUIET_MODE, x)
+#define SD_LANE_QUIET_MODE_6G_QUIET_MODE_GET(x)\
+ FIELD_GET(SD_LANE_QUIET_MODE_6G_QUIET_MODE, x)
+
/* SD_LANE_TARGET:CFG_STAT_FX100:MISC */
#define SD_LANE_MISC(t) __REG(TARGET_SD_LANE, t, 25, 56, 0, 1, 56, 0, 0, 1, 4)
@@ -2692,4 +2788,14 @@ enum sparx5_serdes_target {
#define SD_LANE_25G_SD_LANE_STAT_DBG_OBS_GET(x)\
FIELD_GET(SD_LANE_25G_SD_LANE_STAT_DBG_OBS, x)
+/* SD25G_CFG_TARGET:SD_PWR_CFG:QUIET_MODE_6G */
+#define SD_LANE_25G_QUIET_MODE_6G(t) \
+ __REG(TARGET_SD_LANE_25G, t, 8, 28, 0, 1, 8, 4, 0, 1, 4)
+
+#define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE GENMASK(24, 0)
+#define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_SET(x)\
+ FIELD_PREP(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE, x)
+#define SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE_GET(x)\
+ FIELD_GET(SD_LANE_25G_QUIET_MODE_6G_QUIET_MODE, x)
+
#endif /* _SPARX5_SERDES_REGS_H_ */
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 6850e04c329b..87b17e5877ab 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -2472,7 +2472,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp)
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_unlock;
+ goto err_decrement_count;
}
ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -2522,7 +2522,8 @@ err_assert_reset:
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
+err_decrement_count:
+ qmp->init_count--;
mutex_unlock(&qmp->phy_mutex);
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index 09824be088c9..0c603bc06e09 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -379,7 +379,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
if (ret) {
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
- goto err_unlock;
+ goto err_decrement_count;
}
ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -409,7 +409,8 @@ err_assert_reset:
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
err_disable_regulators:
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
+err_decrement_count:
+ qmp->init_count--;
mutex_unlock(&qmp->phy_mutex);
return ret;
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index a49711c5a63d..5c039bbbe036 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1408,6 +1408,26 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
+};
+
struct qmp_usb_offsets {
u16 serdes;
u16 pcs;
@@ -1629,6 +1649,28 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.has_phy_dp_com_ctrl = true,
};
+static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_usb_offsets_v5,
+
+ .serdes_tbl = sc8280xp_usb3_uniphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_serdes_tbl),
+ .tx_tbl = sc8280xp_usb3_uniphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_tx_tbl),
+ .rx_tbl = sc8280xp_usb3_uniphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
+ .pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = qcm2290_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v5_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
.lanes = 2,
@@ -2598,6 +2640,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,qcm2290-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
}, {
+ .compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
+ .data = &sa8775p_usb3_uniphy_cfg,
+ }, {
.compatible = "qcom,sc7180-qmp-usb3-phy",
.data = &sc7180_usb3phy_cfg,
}, {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 7435173e10f4..1489191a213f 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -376,10 +376,8 @@ static int bcm2835_add_pin_ranges_fallback(struct gpio_chip *gc)
if (!pctldev)
return 0;
- gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
- gc->ngpio);
-
- return 0;
+ return gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0,
+ gc->ngpio);
}
static const struct gpio_chip bcm2835_gpio_chip = {
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index f279b360c20d..7a4dd0c861ab 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -125,6 +125,12 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+ /* Use special handling for Pin0 debounce */
+ pin_reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
+ if (pin_reg & INTERNAL_GPIO0_DEBOUNCE)
+ debounce = 0;
+
pin_reg = readl(gpio_dev->base + offset * 4);
if (debounce) {
@@ -219,6 +225,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *debounce_enable;
char *wake_cntrlz;
+ seq_printf(s, "WAKE_INT_MASTER_REG: 0x%08x\n", readl(gpio_dev->base + WAKE_INT_MASTER_REG));
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
unsigned int time = 0;
unsigned int unit = 0;
@@ -653,21 +660,21 @@ static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
* We must read the pin register again, in case the
* value was changed while executing
* generic_handle_domain_irq() above.
- * If we didn't find a mapping for the interrupt,
- * disable it in order to avoid a system hang caused
- * by an interrupt storm.
+ * If the line is not an irq, disable it in order to
+ * avoid a system hang caused by an interrupt storm.
*/
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
regval = readl(regs + i);
- if (irq == 0) {
- regval &= ~BIT(INTERRUPT_ENABLE_OFF);
+ if (!gpiochip_line_is_irq(gc, irqnr + i)) {
+ regval &= ~BIT(INTERRUPT_MASK_OFF);
dev_dbg(&gpio_dev->pdev->dev,
"Disabling spurious GPIO IRQ %d\n",
irqnr + i);
+ } else {
+ ret = true;
}
writel(regval, regs + i);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- ret = true;
}
}
/* did not cause wake on resume context for shared IRQ */
@@ -870,34 +877,6 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
-{
- struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
- unsigned long flags;
- u32 pin_reg, mask;
- int i;
-
- mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
- BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
- BIT(WAKE_CNTRL_OFF_S4);
-
- for (i = 0; i < desc->npins; i++) {
- int pin = desc->pins[i].number;
- const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
-
- if (!pd)
- continue;
-
- raw_spin_lock_irqsave(&gpio_dev->lock, flags);
-
- pin_reg = readl(gpio_dev->base + i * 4);
- pin_reg &= ~mask;
- writel(pin_reg, gpio_dev->base + i * 4);
-
- raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- }
-}
-
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@@ -1135,9 +1114,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
- /* Disable and mask interrupts */
- amd_gpio_irq_init(gpio_dev);
-
girq = &gpio_dev->gc.irq;
gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip);
/* This will let us handle the parent IRQ in the driver */
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 81ae8319a1f0..1cf2d06bbd8c 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -17,6 +17,7 @@
#define AMD_GPIO_PINS_BANK3 32
#define WAKE_INT_MASTER_REG 0xfc
+#define INTERNAL_GPIO0_DEBOUNCE (1 << 15)
#define EOI_MASK (1 << 29)
#define WAKE_INT_STATUS_REG0 0x2f8
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 43c7857c06a5..b4cd66886f29 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1229,6 +1229,8 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
/* pm8950 has 8 GPIOs with holes on 3 */
{ .compatible = "qcom,pm8950-gpio", .data = (void *) 8 },
+ /* pm8953 has 8 GPIOs with holes on 3 and 6 */
+ { .compatible = "qcom,pm8953-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8994-gpio", .data = (void *) 22 },
{ .compatible = "qcom,pm8998-gpio", .data = (void *) 26 },
{ .compatible = "qcom,pma8084-gpio", .data = (void *) 22 },
diff --git a/drivers/power/reset/gpio-restart.c b/drivers/power/reset/gpio-restart.c
index 35d981d5e6c8..a479d3536eb1 100644
--- a/drivers/power/reset/gpio-restart.c
+++ b/drivers/power/reset/gpio-restart.c
@@ -105,7 +105,7 @@ static int gpio_restart_probe(struct platform_device *pdev)
return 0;
}
-static int gpio_restart_remove(struct platform_device *pdev)
+static void gpio_restart_remove(struct platform_device *pdev)
{
struct gpio_restart *gpio_restart = platform_get_drvdata(pdev);
int ret;
@@ -115,10 +115,7 @@ static int gpio_restart_remove(struct platform_device *pdev)
dev_err(&pdev->dev,
"%s: cannot unregister restart handler, %d\n",
__func__, ret);
- return -ENODEV;
}
-
- return 0;
}
static const struct of_device_id of_gpio_restart_match[] = {
@@ -128,7 +125,7 @@ static const struct of_device_id of_gpio_restart_match[] = {
static struct platform_driver gpio_restart_driver = {
.probe = gpio_restart_probe,
- .remove = gpio_restart_remove,
+ .remove_new = gpio_restart_remove,
.driver = {
.name = "restart-gpio",
.of_match_table = of_gpio_restart_match,
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 307ee6f71042..6f83e99d2eb7 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
*/
static void ab8500_btemp_external_power_changed(struct power_supply *psy)
{
- struct ab8500_btemp *di = power_supply_get_drvdata(psy);
-
- class_for_each_device(power_supply_class, NULL,
- di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+ class_for_each_device(power_supply_class, NULL, psy,
+ ab8500_btemp_get_ext_psy_data);
}
/* ab8500 btemp driver interrupts and their respective isr */
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 41a7bff9ac37..53560fbb6dcd 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2407,10 +2407,8 @@ out:
*/
static void ab8500_fg_external_power_changed(struct power_supply *psy)
{
- struct ab8500_fg *di = power_supply_get_drvdata(psy);
-
- class_for_each_device(power_supply_class, NULL,
- di->fg_psy, ab8500_fg_get_ext_psy_data);
+ class_for_each_device(power_supply_class, NULL, psy,
+ ab8500_fg_get_ext_psy_data);
}
/**
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 05f413178462..3be6f3b10ea4 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
mutex_lock(&info->lock);
info->valid = 0; /* Force updating of the cached registers */
mutex_unlock(&info->lock);
- power_supply_changed(info->bat);
+ power_supply_changed(psy);
}
static struct power_supply_desc fuel_gauge_desc = {
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index de67b985f0a9..dc33f00fcc06 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
bq24190_charger_set_property(bdi->charger,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
&val);
+ power_supply_changed(bdi->charger);
}
/* Sync the input-current-limit with our parent supply (if we have one) */
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index e624834ae66c..5ef93d0603c6 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -41,6 +41,9 @@
#define BQ256XX_IINDPM_MAX_uA 3200000
#define BQ256XX_IINDPM_DEF_uA 2400000
+#define BQ256XX_TS_IGNORE BIT(6)
+#define BQ256XX_TS_IGNORE_SHIFT 6
+
#define BQ256XX_VINDPM_MASK GENMASK(3, 0)
#define BQ256XX_VINDPM_STEP_uV 100000
#define BQ256XX_VINDPM_OFFSET_uV 3900000
@@ -156,6 +159,7 @@
* @vindpm: input voltage limit
* @ichg_max: maximum fast charge current
* @vbatreg_max: maximum charge voltage
+ * @ts_ignore: TS_IGNORE flag
*/
struct bq256xx_init_data {
u32 ichg;
@@ -166,6 +170,7 @@ struct bq256xx_init_data {
u32 vindpm;
u32 ichg_max;
u32 vbatreg_max;
+ bool ts_ignore;
};
/**
@@ -263,6 +268,7 @@ struct bq256xx_device {
* @bq256xx_set_iprechg: pointer to instance specific set_iprechg function
* @bq256xx_set_vindpm: pointer to instance specific set_vindpm function
* @bq256xx_set_charge_type: pointer to instance specific set_charge_type function
+ * @bq256xx_set_ts_ignore: pointer to instance specific set_ts_ignore function
*
* @bq256xx_def_ichg: default ichg value in microamps
* @bq256xx_def_iindpm: default iindpm value in microamps
@@ -295,6 +301,7 @@ struct bq256xx_chip_info {
int (*bq256xx_set_iprechg)(struct bq256xx_device *bq, int iprechg);
int (*bq256xx_set_vindpm)(struct bq256xx_device *bq, int vindpm);
int (*bq256xx_set_charge_type)(struct bq256xx_device *bq, int type);
+ int (*bq256xx_set_ts_ignore)(struct bq256xx_device *bq, bool ts_ignore);
int bq256xx_def_ichg;
int bq256xx_def_iindpm;
@@ -696,6 +703,12 @@ static int bq25601d_set_chrg_volt(struct bq256xx_device *bq, int vbatreg)
BQ256XX_VBATREG_BIT_SHIFT);
}
+static int bq256xx_set_ts_ignore(struct bq256xx_device *bq, bool ts_ignore)
+{
+ return regmap_update_bits(bq->regmap, BQ256XX_INPUT_CURRENT_LIMIT,
+ BQ256XX_TS_IGNORE, (ts_ignore ? 1 : 0) << BQ256XX_TS_IGNORE_SHIFT);
+}
+
static int bq256xx_get_prechrg_curr(struct bq256xx_device *bq)
{
unsigned int prechg_and_term_curr_lim;
@@ -1312,6 +1325,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_get_iterm = bq256xx_get_term_curr,
.bq256xx_get_iprechg = bq256xx_get_prechrg_curr,
.bq256xx_get_vindpm = bq256xx_get_input_volt_lim,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_set_ichg = bq256xx_set_ichg_curr,
.bq256xx_set_iindpm = bq256xx_set_input_curr_lim,
@@ -1351,6 +1365,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1382,6 +1397,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1413,6 +1429,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = NULL,
.bq256xx_def_ichg = BQ2560X_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1444,6 +1461,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq256xx_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = bq256xx_set_ts_ignore,
.bq256xx_def_ichg = BQ25611D_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1475,6 +1493,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq25618_619_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = bq256xx_set_ts_ignore,
.bq256xx_def_ichg = BQ25618_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1506,6 +1525,7 @@ static const struct bq256xx_chip_info bq256xx_chip_info_tbl[] = {
.bq256xx_set_iprechg = bq25618_619_set_prechrg_curr,
.bq256xx_set_vindpm = bq256xx_set_input_volt_lim,
.bq256xx_set_charge_type = bq256xx_set_charge_type,
+ .bq256xx_set_ts_ignore = bq256xx_set_ts_ignore,
.bq256xx_def_ichg = BQ25618_ICHG_DEF_uA,
.bq256xx_def_iindpm = BQ256XX_IINDPM_DEF_uA,
@@ -1622,6 +1642,12 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
if (ret)
return ret;
+ if (bq->chip_info->bq256xx_set_ts_ignore) {
+ ret = bq->chip_info->bq256xx_set_ts_ignore(bq, bq->init_data.ts_ignore);
+ if (ret)
+ return ret;
+ }
+
power_supply_put_battery_info(bq->charger, bat_info);
return 0;
@@ -1656,6 +1682,8 @@ static int bq256xx_parse_dt(struct bq256xx_device *bq,
if (ret)
bq->init_data.iindpm = bq->chip_info->bq256xx_def_iindpm;
+ bq->init_data.ts_ignore = device_property_read_bool(bq->dev, "ti,no-thermistor");
+
return 0;
}
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 22cde35eb144..f8636cf86505 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -750,7 +750,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
if (bq->chip_version != BQ25892)
return;
- ret = power_supply_get_property_from_supplier(bq->charger,
+ ret = power_supply_get_property_from_supplier(psy,
POWER_SUPPLY_PROP_USB_TYPE,
&val);
if (ret)
@@ -775,6 +775,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
}
bq25890_field_write(bq, F_IINLIM, input_current_limit);
+ power_supply_changed(psy);
}
static int bq25890_get_chip_state(struct bq25890_device *bq,
@@ -1106,6 +1107,8 @@ static void bq25890_pump_express_work(struct work_struct *data)
dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n",
voltage);
+ power_supply_changed(bq->charger);
+
return;
error_print:
bq25890_field_write(bq, F_PUMPX_EN, 0);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 5ff6f44fd47b..4296600e8912 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
return ret;
mutex_lock(&bq27xxx_list_lock);
- list_for_each_entry(di, &bq27xxx_battery_devices, list) {
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
- }
+ list_for_each_entry(di, &bq27xxx_battery_devices, list)
+ mod_delayed_work(system_wq, &di->work, 0);
mutex_unlock(&bq27xxx_list_lock);
return ret;
@@ -1761,60 +1759,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
return POWER_SUPPLY_HEALTH_GOOD;
}
-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
-{
- struct bq27xxx_reg_cache cache = {0, };
- bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
-
- cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
- if ((cache.flags & 0xff) == 0xff)
- cache.flags = -1; /* read error */
- if (cache.flags >= 0) {
- cache.temperature = bq27xxx_battery_read_temperature(di);
- if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
- cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
- if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
- cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
- if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
- cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
-
- cache.charge_full = bq27xxx_battery_read_fcc(di);
- cache.capacity = bq27xxx_battery_read_soc(di);
- if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
- cache.energy = bq27xxx_battery_read_energy(di);
- di->cache.flags = cache.flags;
- cache.health = bq27xxx_battery_read_health(di);
- if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
- cache.cycle_count = bq27xxx_battery_read_cyct(di);
-
- /* We only have to read charge design full once */
- if (di->charge_design_full <= 0)
- di->charge_design_full = bq27xxx_battery_read_dcap(di);
- }
-
- if ((di->cache.capacity != cache.capacity) ||
- (di->cache.flags != cache.flags))
- power_supply_changed(di->bat);
-
- if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
- di->cache = cache;
-
- di->last_update = jiffies;
-}
-EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
-
-static void bq27xxx_battery_poll(struct work_struct *work)
-{
- struct bq27xxx_device_info *di =
- container_of(work, struct bq27xxx_device_info,
- work.work);
-
- bq27xxx_battery_update(di);
-
- if (poll_interval > 0)
- schedule_delayed_work(&di->work, poll_interval * HZ);
-}
-
static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
{
if (di->opts & BQ27XXX_O_ZERO)
@@ -1833,7 +1777,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
static int bq27xxx_battery_current_and_status(
struct bq27xxx_device_info *di,
union power_supply_propval *val_curr,
- union power_supply_propval *val_status)
+ union power_supply_propval *val_status,
+ struct bq27xxx_reg_cache *cache)
{
bool single_flags = (di->opts & BQ27XXX_O_ZERO);
int curr;
@@ -1845,10 +1790,14 @@ static int bq27xxx_battery_current_and_status(
return curr;
}
- flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
- if (flags < 0) {
- dev_err(di->dev, "error reading flags\n");
- return flags;
+ if (cache) {
+ flags = cache->flags;
+ } else {
+ flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
+ if (flags < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return flags;
+ }
}
if (di->opts & BQ27XXX_O_ZERO) {
@@ -1883,6 +1832,78 @@ static int bq27xxx_battery_current_and_status(
return 0;
}
+static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+{
+ union power_supply_propval status = di->last_status;
+ struct bq27xxx_reg_cache cache = {0, };
+ bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+
+ cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+ if ((cache.flags & 0xff) == 0xff)
+ cache.flags = -1; /* read error */
+ if (cache.flags >= 0) {
+ cache.temperature = bq27xxx_battery_read_temperature(di);
+ if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+ cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+ if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+ cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+ if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+ cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+
+ cache.charge_full = bq27xxx_battery_read_fcc(di);
+ cache.capacity = bq27xxx_battery_read_soc(di);
+ if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+ cache.energy = bq27xxx_battery_read_energy(di);
+ di->cache.flags = cache.flags;
+ cache.health = bq27xxx_battery_read_health(di);
+ if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+ cache.cycle_count = bq27xxx_battery_read_cyct(di);
+
+ /*
+ * On gauges with signed current reporting the current must be
+ * checked to detect charging <-> discharging status changes.
+ */
+ if (!(di->opts & BQ27XXX_O_ZERO))
+ bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
+
+ /* We only have to read charge design full once */
+ if (di->charge_design_full <= 0)
+ di->charge_design_full = bq27xxx_battery_read_dcap(di);
+ }
+
+ if ((di->cache.capacity != cache.capacity) ||
+ (di->cache.flags != cache.flags) ||
+ (di->last_status.intval != status.intval)) {
+ di->last_status.intval = status.intval;
+ power_supply_changed(di->bat);
+ }
+
+ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+ di->cache = cache;
+
+ di->last_update = jiffies;
+
+ if (!di->removed && poll_interval > 0)
+ mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+}
+
+void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+{
+ mutex_lock(&di->lock);
+ bq27xxx_battery_update_unlocked(di);
+ mutex_unlock(&di->lock);
+}
+EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
+
+static void bq27xxx_battery_poll(struct work_struct *work)
+{
+ struct bq27xxx_device_info *di =
+ container_of(work, struct bq27xxx_device_info,
+ work.work);
+
+ bq27xxx_battery_update(di);
+}
+
/*
* Get the average power in µW
* Return < 0 if something fails.
@@ -1985,10 +2006,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
mutex_lock(&di->lock);
- if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
- cancel_delayed_work_sync(&di->work);
- bq27xxx_battery_poll(&di->work.work);
- }
+ if (time_is_before_jiffies(di->last_update + 5 * HZ))
+ bq27xxx_battery_update_unlocked(di);
mutex_unlock(&di->lock);
if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
@@ -1996,7 +2015,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- ret = bq27xxx_battery_current_and_status(di, NULL, val);
+ ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
ret = bq27xxx_battery_voltage(di, val);
@@ -2005,7 +2024,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
val->intval = di->cache.flags < 0 ? 0 : 1;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = bq27xxx_battery_current_and_status(di, val, NULL);
+ ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL);
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = bq27xxx_simple_value(di->cache.capacity, val);
@@ -2078,8 +2097,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
{
struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
- cancel_delayed_work_sync(&di->work);
- schedule_delayed_work(&di->work, 0);
+ /* After charger plug in/out wait 0.5s for things to stabilize */
+ mod_delayed_work(system_wq, &di->work, HZ / 2);
}
int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -2127,22 +2146,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
{
- /*
- * power_supply_unregister call bq27xxx_battery_get_property which
- * call bq27xxx_battery_poll.
- * Make sure that bq27xxx_battery_poll will not call
- * schedule_delayed_work again after unregister (which cause OOPS).
- */
- poll_interval = 0;
-
- cancel_delayed_work_sync(&di->work);
-
- power_supply_unregister(di->bat);
-
mutex_lock(&bq27xxx_list_lock);
list_del(&di->list);
mutex_unlock(&bq27xxx_list_lock);
+ /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
+ mutex_lock(&di->lock);
+ di->removed = true;
+ mutex_unlock(&di->lock);
+
+ cancel_delayed_work_sync(&di->work);
+
+ power_supply_unregister(di->bat);
mutex_destroy(&di->lock);
}
EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index f8768997333b..6d3c74876339 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
i2c_set_clientdata(client, di);
if (client->irq) {
- ret = devm_request_threaded_irq(&client->dev, client->irq,
+ ret = request_threaded_irq(client->irq,
NULL, bq27xxx_battery_irq_handler_thread,
IRQF_ONESHOT,
di->name, di);
@@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
+ free_irq(client->irq, di);
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);
diff --git a/drivers/power/supply/cros_peripheral_charger.c b/drivers/power/supply/cros_peripheral_charger.c
index 1379afd9698d..a204f2355be4 100644
--- a/drivers/power/supply/cros_peripheral_charger.c
+++ b/drivers/power/supply/cros_peripheral_charger.c
@@ -227,8 +227,7 @@ static int cros_pchg_get_prop(struct power_supply *psy,
return 0;
}
-static int cros_pchg_event(const struct charger_data *charger,
- unsigned long host_event)
+static int cros_pchg_event(const struct charger_data *charger)
{
int i;
@@ -256,7 +255,7 @@ static int cros_ec_notify(struct notifier_block *nb,
if (!(host_event & EC_MKBP_PCHG_DEVICE_EVENT))
return NOTIFY_DONE;
- return cros_pchg_event(charger, host_event);
+ return cros_pchg_event(charger);
}
static int cros_pchg_probe(struct platform_device *pdev)
@@ -281,6 +280,8 @@ static int cros_pchg_probe(struct platform_device *pdev)
charger->ec_dev = ec_dev;
charger->ec_device = ec_device;
+ platform_set_drvdata(pdev, charger);
+
ret = cros_pchg_port_count(charger);
if (ret <= 0) {
/*
@@ -349,9 +350,27 @@ static int cros_pchg_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int __maybe_unused cros_pchg_resume(struct device *dev)
+{
+ struct charger_data *charger = dev_get_drvdata(dev);
+
+ /*
+ * Sync all ports on resume in case reports from EC are lost during
+ * the last suspend.
+ */
+ cros_pchg_event(charger);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_pchg_pm_ops, NULL, cros_pchg_resume);
+
static struct platform_driver cros_pchg_driver = {
.driver = {
.name = DRV_NAME,
+ .pm = &cros_pchg_pm_ops,
},
.probe = cros_pchg_probe
};
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 89cabe8ed3b0..6a2d00d95a39 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -499,11 +499,6 @@ static int max17042_property_is_writeable(struct power_supply *psy,
return ret;
}
-static void max17042_external_power_changed(struct power_supply *psy)
-{
- power_supply_changed(psy);
-}
-
static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value)
{
int retries = 8;
@@ -1016,7 +1011,7 @@ static const struct power_supply_desc max17042_psy_desc = {
.get_property = max17042_get_property,
.set_property = max17042_set_property,
.property_is_writeable = max17042_property_is_writeable,
- .external_power_changed = max17042_external_power_changed,
+ .external_power_changed = power_supply_changed,
.properties = max17042_battery_props,
.num_properties = ARRAY_SIZE(max17042_battery_props),
};
diff --git a/drivers/power/supply/power_supply_hwmon.c b/drivers/power/supply/power_supply_hwmon.c
index a48aa4afb828..c97893d4c25e 100644
--- a/drivers/power/supply/power_supply_hwmon.c
+++ b/drivers/power/supply/power_supply_hwmon.c
@@ -293,7 +293,7 @@ static const struct hwmon_ops power_supply_hwmon_ops = {
.read_string = power_supply_hwmon_read_string,
};
-static const struct hwmon_channel_info *power_supply_hwmon_info[] = {
+static const struct hwmon_channel_info * const power_supply_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp,
HWMON_T_LABEL |
HWMON_T_INPUT |
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index 702bf83f6e6d..0674483279d7 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
led_trigger_event(psy->charging_full_trig, LED_FULL);
led_trigger_event(psy->charging_trig, LED_OFF);
led_trigger_event(psy->full_trig, LED_FULL);
- led_trigger_event(psy->charging_blink_full_solid_trig,
- LED_FULL);
+ /* Going from blink to LED on requires a LED_OFF event to stop blink */
+ led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
+ led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
break;
case POWER_SUPPLY_STATUS_CHARGING:
led_trigger_event(psy->charging_full_trig, LED_FULL);
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 1a2143641e66..8328bcea1a29 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1134,7 +1134,7 @@ static int rk817_charger_probe(struct platform_device *pdev)
&bat_info);
if (ret) {
return dev_err_probe(dev, ret,
- "Unable to get battery info: %d\n", ret);
+ "Unable to get battery info\n");
}
if ((bat_info->charge_full_design_uah <= 0) ||
diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c
index 632977f84b95..bd23c4d9fed4 100644
--- a/drivers/power/supply/sc27xx_fuel_gauge.c
+++ b/drivers/power/supply/sc27xx_fuel_gauge.c
@@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
return ret;
}
-static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
-{
- struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
-
- power_supply_changed(data->battery);
-}
-
static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
@@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
.num_properties = ARRAY_SIZE(sc27xx_fgu_props),
.get_property = sc27xx_fgu_get_property,
.set_property = sc27xx_fgu_set_property,
- .external_power_changed = sc27xx_fgu_external_power_changed,
+ .external_power_changed = power_supply_changed,
.property_is_writeable = sc27xx_fgu_property_is_writeable,
.no_thermal = true,
};
diff --git a/drivers/power/supply/twl4030_madc_battery.c b/drivers/power/supply/twl4030_madc_battery.c
index 48649dcfe3a9..7fe029673b22 100644
--- a/drivers/power/supply/twl4030_madc_battery.c
+++ b/drivers/power/supply/twl4030_madc_battery.c
@@ -168,19 +168,13 @@ static int twl4030_madc_bat_get_property(struct power_supply *psy,
return 0;
}
-static void twl4030_madc_bat_ext_changed(struct power_supply *psy)
-{
- power_supply_changed(psy);
-}
-
static const struct power_supply_desc twl4030_madc_bat_desc = {
.name = "twl4030_battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = twl4030_madc_bat_props,
.num_properties = ARRAY_SIZE(twl4030_madc_bat_props),
.get_property = twl4030_madc_bat_get_property,
- .external_power_changed = twl4030_madc_bat_ext_changed,
-
+ .external_power_changed = power_supply_changed,
};
static int twl4030_cmp(const void *a, const void *b)
diff --git a/drivers/regulator/88pg86x.c b/drivers/regulator/88pg86x.c
index 74275b681f46..e6598e74ec94 100644
--- a/drivers/regulator/88pg86x.c
+++ b/drivers/regulator/88pg86x.c
@@ -104,7 +104,7 @@ static struct i2c_driver pg86x_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pg86x_dt_ids),
},
- .probe_new = pg86x_i2c_probe,
+ .probe = pg86x_i2c_probe,
.id_table = pg86x_i2c_id,
};
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 5c409ff4aa99..a504b01dd99c 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -791,7 +791,7 @@ static struct i2c_driver act8865_pmic_driver = {
.name = "act8865",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = act8865_pmic_probe,
+ .probe = act8865_pmic_probe,
.id_table = act8865_ids,
};
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index c228cf6956d1..40f7dba42b5a 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -254,7 +254,7 @@ static int ad5398_probe(struct i2c_client *client)
}
static struct i2c_driver ad5398_driver = {
- .probe_new = ad5398_probe,
+ .probe = ad5398_probe,
.driver = {
.name = "ad5398",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 6ce0fdc18b9c..122124944749 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -1197,7 +1197,7 @@ static struct i2c_driver da9121_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(da9121_dt_ids),
},
- .probe_new = da9121_i2c_probe,
+ .probe = da9121_i2c_probe,
.remove = da9121_i2c_remove,
.id_table = da9121_i2c_id,
};
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index 4332a3b8a672..252f74ab9bc0 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -224,7 +224,7 @@ static struct i2c_driver da9210_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(da9210_dt_ids),
},
- .probe_new = da9210_i2c_probe,
+ .probe = da9210_i2c_probe,
.id_table = da9210_i2c_id,
};
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index a2b4f6f1e34b..af383ff0fe57 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -555,7 +555,7 @@ static struct i2c_driver da9211_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(da9211_dt_ids),
},
- .probe_new = da9211_i2c_probe,
+ .probe = da9211_i2c_probe,
.id_table = da9211_i2c_id,
};
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 130f3dbe9840..289c06e09f47 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -775,7 +775,7 @@ static struct i2c_driver fan53555_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(fan53555_dt_ids),
},
- .probe_new = fan53555_regulator_probe,
+ .probe = fan53555_regulator_probe,
.id_table = fan53555_id,
};
diff --git a/drivers/regulator/fan53880.c b/drivers/regulator/fan53880.c
index a3bebdee570e..6cb5656845f9 100644
--- a/drivers/regulator/fan53880.c
+++ b/drivers/regulator/fan53880.c
@@ -175,7 +175,7 @@ static struct i2c_driver fan53880_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = fan53880_dt_ids,
},
- .probe_new = fan53880_i2c_probe,
+ .probe = fan53880_i2c_probe,
.id_table = fan53880_i2c_id,
};
module_i2c_driver(fan53880_regulator_driver);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 3c37c4de1d82..69b4afe95e66 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -149,7 +149,7 @@ static struct i2c_driver isl6271a_i2c_driver = {
.name = "isl6271a",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = isl6271a_probe,
+ .probe = isl6271a_probe,
.id_table = isl6271a_id,
};
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 90bc8d054304..0f7560093091 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -198,7 +198,7 @@ static struct i2c_driver isl9305_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(isl9305_dt_ids),
},
- .probe_new = isl9305_i2c_probe,
+ .probe = isl9305_i2c_probe,
.id_table = isl9305_i2c_id,
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index e06f2a092b89..e1b5c45f97f4 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -449,7 +449,7 @@ static struct i2c_driver lp3971_i2c_driver = {
.name = "LP3971",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = lp3971_i2c_probe,
+ .probe = lp3971_i2c_probe,
.id_table = lp3971_i2c_id,
};
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index edacca8e14af..7bd6f05edd8d 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -547,7 +547,7 @@ static struct i2c_driver lp3972_i2c_driver = {
.name = "lp3972",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = lp3972_i2c_probe,
+ .probe = lp3972_i2c_probe,
.id_table = lp3972_i2c_id,
};
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index a8b0969d4f31..63aa227b1813 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -947,7 +947,7 @@ static struct i2c_driver lp872x_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(lp872x_dt_ids),
},
- .probe_new = lp872x_probe,
+ .probe = lp872x_probe,
.id_table = lp872x_ids,
};
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index 37b51b94fb5a..4bc310f972ed 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -442,7 +442,7 @@ static struct i2c_driver lp8755_i2c_driver = {
.name = LP8755_NAME,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = lp8755_probe,
+ .probe = lp8755_probe,
.remove = lp8755_remove,
.id_table = lp8755_id,
};
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 359b534d8c70..149ac281c1f9 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -477,7 +477,7 @@ static struct i2c_driver ltc3589_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(ltc3589_of_match),
},
- .probe_new = ltc3589_probe,
+ .probe = ltc3589_probe,
.id_table = ltc3589_i2c_id,
};
module_i2c_driver(ltc3589_driver);
diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c
index a28e6c3460f1..2a225c722564 100644
--- a/drivers/regulator/ltc3676.c
+++ b/drivers/regulator/ltc3676.c
@@ -374,7 +374,7 @@ static struct i2c_driver ltc3676_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(ltc3676_of_match),
},
- .probe_new = ltc3676_regulator_probe,
+ .probe = ltc3676_regulator_probe,
.id_table = ltc3676_i2c_id,
};
module_i2c_driver(ltc3676_driver);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 5d8852b2c168..90aa5b723c03 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -289,7 +289,7 @@ static const struct i2c_device_id max1586_id[] = {
MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
- .probe_new = max1586_pmic_probe,
+ .probe = max1586_pmic_probe,
.driver = {
.name = "max1586",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index ace1d582a191..fad31f5f435e 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -323,7 +323,7 @@ static struct i2c_driver max20086_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(max20086_dt_ids),
},
- .probe_new = max20086_i2c_probe,
+ .probe = max20086_i2c_probe,
.id_table = max20086_i2c_id,
};
diff --git a/drivers/regulator/max20411-regulator.c b/drivers/regulator/max20411-regulator.c
index be8169b86a89..8c09dc71b16d 100644
--- a/drivers/regulator/max20411-regulator.c
+++ b/drivers/regulator/max20411-regulator.c
@@ -156,7 +156,7 @@ static struct i2c_driver max20411_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_max20411_match_tbl,
},
- .probe_new = max20411_probe,
+ .probe = max20411_probe,
.id_table = max20411_id,
};
module_i2c_driver(max20411_i2c_driver);
diff --git a/drivers/regulator/max77826-regulator.c b/drivers/regulator/max77826-regulator.c
index ea5d4b18b464..3855f5e686d8 100644
--- a/drivers/regulator/max77826-regulator.c
+++ b/drivers/regulator/max77826-regulator.c
@@ -292,7 +292,7 @@ static struct i2c_driver max77826_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(max77826_of_match),
},
- .probe_new = max77826_i2c_probe,
+ .probe = max77826_i2c_probe,
.id_table = max77826_id,
};
module_i2c_driver(max77826_regulator_driver);
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index a517fb4e3669..24e1dfba78c8 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -246,7 +246,7 @@ static const struct i2c_device_id max8649_id[] = {
MODULE_DEVICE_TABLE(i2c, max8649_id);
static struct i2c_driver max8649_driver = {
- .probe_new = max8649_regulator_probe,
+ .probe = max8649_regulator_probe,
.driver = {
.name = "max8649",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index d6b89f07ae9e..ede17099b727 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -503,7 +503,7 @@ static const struct i2c_device_id max8660_id[] = {
MODULE_DEVICE_TABLE(i2c, max8660_id);
static struct i2c_driver max8660_driver = {
- .probe_new = max8660_probe,
+ .probe = max8660_probe,
.driver = {
.name = "max8660",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/max8893.c b/drivers/regulator/max8893.c
index 10ffd77828b7..cb0e72948dd4 100644
--- a/drivers/regulator/max8893.c
+++ b/drivers/regulator/max8893.c
@@ -168,7 +168,7 @@ static const struct i2c_device_id max8893_ids[] = {
MODULE_DEVICE_TABLE(i2c, max8893_ids);
static struct i2c_driver max8893_driver = {
- .probe_new = max8893_probe_new,
+ .probe = max8893_probe_new,
.driver = {
.name = "max8893",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 8ad8fe7fd263..0b0b841d214a 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -313,7 +313,7 @@ static const struct i2c_device_id max8952_ids[] = {
MODULE_DEVICE_TABLE(i2c, max8952_ids);
static struct i2c_driver max8952_pmic_driver = {
- .probe_new = max8952_pmic_probe,
+ .probe = max8952_pmic_probe,
.driver = {
.name = "max8952",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index a991a884a31b..8d5193207552 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -807,7 +807,7 @@ static struct i2c_driver max8973_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_max8973_match_tbl,
},
- .probe_new = max8973_probe,
+ .probe = max8973_probe,
.id_table = max8973_id,
};
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 3a6d79556942..6c6f5a21362b 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -584,7 +584,7 @@ static const struct i2c_device_id mcp16502_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, mcp16502_i2c_id);
static struct i2c_driver mcp16502_drv = {
- .probe_new = mcp16502_probe,
+ .probe = mcp16502_probe,
.driver = {
.name = "mcp16502-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
diff --git a/drivers/regulator/mp5416.c b/drivers/regulator/mp5416.c
index 91e9019430b8..3886b252fbe7 100644
--- a/drivers/regulator/mp5416.c
+++ b/drivers/regulator/mp5416.c
@@ -240,7 +240,7 @@ static struct i2c_driver mp5416_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mp5416_of_match),
},
- .probe_new = mp5416_i2c_probe,
+ .probe = mp5416_i2c_probe,
.id_table = mp5416_id,
};
module_i2c_driver(mp5416_regulator_driver);
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
index b968a682f38a..b820bd6043e5 100644
--- a/drivers/regulator/mp8859.c
+++ b/drivers/regulator/mp8859.c
@@ -147,7 +147,7 @@ static struct i2c_driver mp8859_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mp8859_dt_id),
},
- .probe_new = mp8859_i2c_probe,
+ .probe = mp8859_i2c_probe,
.id_table = mp8859_i2c_id,
};
diff --git a/drivers/regulator/mp886x.c b/drivers/regulator/mp886x.c
index 250c27e462f1..ede1b1e58002 100644
--- a/drivers/regulator/mp886x.c
+++ b/drivers/regulator/mp886x.c
@@ -365,7 +365,7 @@ static struct i2c_driver mp886x_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = mp886x_dt_ids,
},
- .probe_new = mp886x_i2c_probe,
+ .probe = mp886x_i2c_probe,
.id_table = mp886x_id,
};
module_i2c_driver(mp886x_regulator_driver);
diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c
index 544d41b88514..bf677c535edc 100644
--- a/drivers/regulator/mpq7920.c
+++ b/drivers/regulator/mpq7920.c
@@ -321,7 +321,7 @@ static struct i2c_driver mpq7920_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mpq7920_of_match),
},
- .probe_new = mpq7920_i2c_probe,
+ .probe = mpq7920_i2c_probe,
.id_table = mpq7920_id,
};
module_i2c_driver(mpq7920_regulator_driver);
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
index a9f0c9f725d4..b0771770cc26 100644
--- a/drivers/regulator/mt6311-regulator.c
+++ b/drivers/regulator/mt6311-regulator.c
@@ -154,7 +154,7 @@ static struct i2c_driver mt6311_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(mt6311_dt_ids),
},
- .probe_new = mt6311_i2c_probe,
+ .probe = mt6311_i2c_probe,
.id_table = mt6311_i2c_id,
};
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 87a746dcb516..9f2e33a5a69e 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -875,7 +875,7 @@ static struct i2c_driver pca9450_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = pca9450_of_match,
},
- .probe_new = pca9450_i2c_probe,
+ .probe = pca9450_i2c_probe,
};
module_i2c_driver(pca9450_i2c_driver);
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 99a15c3be396..b0781d9a1058 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -610,7 +610,7 @@ static struct i2c_driver pf8x00_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = pf8x00_dt_ids,
},
- .probe_new = pf8x00_i2c_probe,
+ .probe = pf8x00_i2c_probe,
};
module_i2c_driver(pf8x00_regulator_driver);
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index a9fcf6a41494..8d7e6c323324 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -848,7 +848,7 @@ static struct i2c_driver pfuze_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = pfuze_dt_ids,
},
- .probe_new = pfuze100_regulator_probe,
+ .probe = pfuze100_regulator_probe,
};
module_i2c_driver(pfuze_driver);
diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
index f170e0dd1819..aa90360fa046 100644
--- a/drivers/regulator/pv88060-regulator.c
+++ b/drivers/regulator/pv88060-regulator.c
@@ -379,7 +379,7 @@ static struct i2c_driver pv88060_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pv88060_dt_ids),
},
- .probe_new = pv88060_i2c_probe,
+ .probe = pv88060_i2c_probe,
.id_table = pv88060_i2c_id,
};
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
index 133b89d5215c..7ab3e4a9bd28 100644
--- a/drivers/regulator/pv88080-regulator.c
+++ b/drivers/regulator/pv88080-regulator.c
@@ -560,7 +560,7 @@ static struct i2c_driver pv88080_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pv88080_dt_ids),
},
- .probe_new = pv88080_i2c_probe,
+ .probe = pv88080_i2c_probe,
.id_table = pv88080_i2c_id,
};
diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
index 1bc33bc10992..f4acde4d56c8 100644
--- a/drivers/regulator/pv88090-regulator.c
+++ b/drivers/regulator/pv88090-regulator.c
@@ -400,7 +400,7 @@ static struct i2c_driver pv88090_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(pv88090_dt_ids),
},
- .probe_new = pv88090_i2c_probe,
+ .probe = pv88090_i2c_probe,
.id_table = pv88090_i2c_id,
};
diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c
index 9afe961a87f1..e9719a378a0b 100644
--- a/drivers/regulator/rpi-panel-attiny-regulator.c
+++ b/drivers/regulator/rpi-panel-attiny-regulator.c
@@ -399,7 +399,7 @@ static struct i2c_driver attiny_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(attiny_dt_ids),
},
- .probe_new = attiny_i2c_probe,
+ .probe = attiny_i2c_probe,
.remove = attiny_i2c_remove,
};
diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
index be3dc981195c..4955bfea4e84 100644
--- a/drivers/regulator/rt4801-regulator.c
+++ b/drivers/regulator/rt4801-regulator.c
@@ -242,7 +242,7 @@ static struct i2c_driver rt4801_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(rt4801_of_id),
},
- .probe_new = rt4801_probe,
+ .probe = rt4801_probe,
};
module_i2c_driver(rt4801_driver);
diff --git a/drivers/regulator/rt5190a-regulator.c b/drivers/regulator/rt5190a-regulator.c
index f6c12f87fb8d..a53ed523b5d7 100644
--- a/drivers/regulator/rt5190a-regulator.c
+++ b/drivers/regulator/rt5190a-regulator.c
@@ -508,7 +508,7 @@ static struct i2c_driver rt5190a_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rt5190a_device_table,
},
- .probe_new = rt5190a_probe,
+ .probe = rt5190a_probe,
};
module_i2c_driver(rt5190a_driver);
diff --git a/drivers/regulator/rt5759-regulator.c b/drivers/regulator/rt5759-regulator.c
index d5a42ad21a9a..90555a9ef1b0 100644
--- a/drivers/regulator/rt5759-regulator.c
+++ b/drivers/regulator/rt5759-regulator.c
@@ -362,7 +362,7 @@ static struct i2c_driver rt5759_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(rt5759_device_table),
},
- .probe_new = rt5759_probe,
+ .probe = rt5759_probe,
};
module_i2c_driver(rt5759_driver);
diff --git a/drivers/regulator/rt6160-regulator.c b/drivers/regulator/rt6160-regulator.c
index 8990dac23460..e2a0eee95c61 100644
--- a/drivers/regulator/rt6160-regulator.c
+++ b/drivers/regulator/rt6160-regulator.c
@@ -311,7 +311,7 @@ static struct i2c_driver rt6160_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rt6160_of_match_table,
},
- .probe_new = rt6160_probe,
+ .probe = rt6160_probe,
};
module_i2c_driver(rt6160_driver);
diff --git a/drivers/regulator/rt6190-regulator.c b/drivers/regulator/rt6190-regulator.c
index ca91a1f6d3c8..3883440295ed 100644
--- a/drivers/regulator/rt6190-regulator.c
+++ b/drivers/regulator/rt6190-regulator.c
@@ -487,7 +487,7 @@ static struct i2c_driver rt6190_driver = {
.of_match_table = rt6190_of_dev_table,
.pm = pm_ptr(&rt6190_dev_pm),
},
- .probe_new = rt6190_probe,
+ .probe = rt6190_probe,
};
module_i2c_driver(rt6190_driver);
diff --git a/drivers/regulator/rt6245-regulator.c b/drivers/regulator/rt6245-regulator.c
index 8721d11c7964..1843ecec1922 100644
--- a/drivers/regulator/rt6245-regulator.c
+++ b/drivers/regulator/rt6245-regulator.c
@@ -246,7 +246,7 @@ static struct i2c_driver rt6245_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rt6245_of_match_table,
},
- .probe_new = rt6245_probe,
+ .probe = rt6245_probe,
};
module_i2c_driver(rt6245_driver);
diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
index 7cbb812477e1..dfd1522637e4 100644
--- a/drivers/regulator/rtmv20-regulator.c
+++ b/drivers/regulator/rtmv20-regulator.c
@@ -429,7 +429,7 @@ static struct i2c_driver rtmv20_driver = {
.of_match_table = of_match_ptr(rtmv20_of_id),
.pm = &rtmv20_pm,
},
- .probe_new = rtmv20_probe,
+ .probe = rtmv20_probe,
};
module_i2c_driver(rtmv20_driver);
diff --git a/drivers/regulator/rtq2134-regulator.c b/drivers/regulator/rtq2134-regulator.c
index ee1577dc3cfc..b7372cb2b97d 100644
--- a/drivers/regulator/rtq2134-regulator.c
+++ b/drivers/regulator/rtq2134-regulator.c
@@ -366,7 +366,7 @@ static struct i2c_driver rtq2134_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rtq2134_device_tables,
},
- .probe_new = rtq2134_probe,
+ .probe = rtq2134_probe,
};
module_i2c_driver(rtq2134_driver);
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
index 8559a266a7eb..8176e5ab0683 100644
--- a/drivers/regulator/rtq6752-regulator.c
+++ b/drivers/regulator/rtq6752-regulator.c
@@ -281,7 +281,7 @@ static struct i2c_driver rtq6752_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = rtq6752_device_table,
},
- .probe_new = rtq6752_probe,
+ .probe = rtq6752_probe,
};
module_i2c_driver(rtq6752_driver);
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 559ae031010f..59aa16825d8a 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -507,7 +507,7 @@ static struct i2c_driver slg51000_regulator_driver = {
.name = "slg51000-regulator",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = slg51000_i2c_probe,
+ .probe = slg51000_i2c_probe,
.id_table = slg51000_i2c_id,
};
diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c
index e3c753986309..1bcfdd6dcfc1 100644
--- a/drivers/regulator/sy8106a-regulator.c
+++ b/drivers/regulator/sy8106a-regulator.c
@@ -141,7 +141,7 @@ static struct i2c_driver sy8106a_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sy8106a_i2c_of_match,
},
- .probe_new = sy8106a_i2c_probe,
+ .probe = sy8106a_i2c_probe,
.id_table = sy8106a_i2c_id,
};
diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c
index c327ad69f676..d0703105c439 100644
--- a/drivers/regulator/sy8824x.c
+++ b/drivers/regulator/sy8824x.c
@@ -236,7 +236,7 @@ static struct i2c_driver sy8824_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sy8824_dt_ids,
},
- .probe_new = sy8824_i2c_probe,
+ .probe = sy8824_i2c_probe,
.id_table = sy8824_id,
};
module_i2c_driver(sy8824_regulator_driver);
diff --git a/drivers/regulator/sy8827n.c b/drivers/regulator/sy8827n.c
index 99ca08cc3a6a..433959b43549 100644
--- a/drivers/regulator/sy8827n.c
+++ b/drivers/regulator/sy8827n.c
@@ -190,7 +190,7 @@ static struct i2c_driver sy8827n_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = sy8827n_dt_ids,
},
- .probe_new = sy8827n_i2c_probe,
+ .probe = sy8827n_i2c_probe,
.id_table = sy8827n_id,
};
module_i2c_driver(sy8827n_regulator_driver);
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 9bd4e72914ed..d8a856c1587a 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -354,7 +354,7 @@ static struct i2c_driver tps51632_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(tps51632_of_match),
},
- .probe_new = tps51632_probe,
+ .probe = tps51632_probe,
.id_table = tps51632_id,
};
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index 65cc08d1a67d..32e1a05a57fd 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -491,7 +491,7 @@ static struct i2c_driver tps62360_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(tps62360_of_match),
},
- .probe_new = tps62360_probe,
+ .probe = tps62360_probe,
.shutdown = tps62360_shutdown,
.id_table = tps62360_id,
};
diff --git a/drivers/regulator/tps6286x-regulator.c b/drivers/regulator/tps6286x-regulator.c
index f92e7649d0a0..b1c4b5120745 100644
--- a/drivers/regulator/tps6286x-regulator.c
+++ b/drivers/regulator/tps6286x-regulator.c
@@ -150,7 +150,7 @@ static struct i2c_driver tps6286x_regulator_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(tps6286x_dt_ids),
},
- .probe_new = tps6286x_i2c_probe,
+ .probe = tps6286x_i2c_probe,
.id_table = tps6286x_i2c_id,
};
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index d87cac63f346..d5757fd9a65b 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -337,7 +337,7 @@ static struct i2c_driver tps_65023_i2c_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
.of_match_table = of_match_ptr(tps65023_of_match),
},
- .probe_new = tps_65023_probe,
+ .probe = tps_65023_probe,
.id_table = tps_65023_id,
};
diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c
index d4b02ee791d1..a06f5f2d7932 100644
--- a/drivers/regulator/tps65132-regulator.c
+++ b/drivers/regulator/tps65132-regulator.c
@@ -272,7 +272,7 @@ static struct i2c_driver tps65132_i2c_driver = {
.name = "tps65132",
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe_new = tps65132_probe,
+ .probe = tps65132_probe,
.id_table = tps65132_id,
};
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 6aa8f243b30c..010f7b2c527c 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -143,9 +143,6 @@ config RESET_NPCM
This enables the reset controller driver for Nuvoton NPCM
BMC SoCs.
-config RESET_OXNAS
- bool
-
config RESET_PISTACHIO
bool "Pistachio Reset Driver"
depends on MIPS || COMPILE_TEST
@@ -154,7 +151,8 @@ config RESET_PISTACHIO
config RESET_POLARFIRE_SOC
bool "Microchip PolarFire SoC (MPFS) Reset Driver"
- depends on AUXILIARY_BUS && MCHP_CLK_MPFS
+ depends on MCHP_CLK_MPFS
+ select AUXILIARY_BUS
default MCHP_CLK_MPFS
help
This driver supports peripheral reset for the Microchip PolarFire SoC
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 7fec5af6c964..acb6f824837d 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_RESET_MCHP_SPARX5) += reset-microchip-sparx5.o
obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_MESON_AUDIO_ARB) += reset-meson-audio-arb.o
obj-$(CONFIG_RESET_NPCM) += reset-npcm.o
-obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
obj-$(CONFIG_RESET_POLARFIRE_SOC) += reset-mpfs.o
obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index e48d8fcb3133..a8b8f5ea77ec 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -86,7 +86,6 @@ static int ath79_reset_restart_handler(struct notifier_block *nb,
static int ath79_reset_probe(struct platform_device *pdev)
{
struct ath79_reset *ath79_reset;
- struct resource *res;
int err;
ath79_reset = devm_kzalloc(&pdev->dev,
@@ -96,8 +95,7 @@ static int ath79_reset_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ath79_reset);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ath79_reset->base = devm_ioremap_resource(&pdev->dev, res);
+ ath79_reset->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ath79_reset->base))
return PTR_ERR(ath79_reset->base);
diff --git a/drivers/reset/reset-axs10x.c b/drivers/reset/reset-axs10x.c
index a854ef41364d..115f69e0db33 100644
--- a/drivers/reset/reset-axs10x.c
+++ b/drivers/reset/reset-axs10x.c
@@ -44,14 +44,12 @@ static const struct reset_control_ops axs10x_reset_ops = {
static int axs10x_reset_probe(struct platform_device *pdev)
{
struct axs10x_rst *rst;
- struct resource *mem;
rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
if (!rst)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rst->regs_rst = devm_ioremap_resource(&pdev->dev, mem);
+ rst->regs_rst = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rst->regs_rst))
return PTR_ERR(rst->regs_rst);
diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c
index 433fa0c40e47..823317772bac 100644
--- a/drivers/reset/reset-brcmstb-rescal.c
+++ b/drivers/reset/reset-brcmstb-rescal.c
@@ -66,14 +66,12 @@ static const struct reset_control_ops brcm_rescal_reset_ops = {
static int brcm_rescal_reset_probe(struct platform_device *pdev)
{
struct brcm_rescal_reset *data;
- struct resource *res;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c
index 4c7b8647b49c..98460e08752c 100644
--- a/drivers/reset/reset-hsdk.c
+++ b/drivers/reset/reset-hsdk.c
@@ -92,19 +92,16 @@ static const struct reset_control_ops hsdk_reset_ops = {
static int hsdk_reset_probe(struct platform_device *pdev)
{
struct hsdk_rst *rst;
- struct resource *mem;
rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
if (!rst)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rst->regs_ctl = devm_ioremap_resource(&pdev->dev, mem);
+ rst->regs_ctl = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rst->regs_ctl))
return PTR_ERR(rst->regs_ctl);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- rst->regs_rst = devm_ioremap_resource(&pdev->dev, mem);
+ rst->regs_rst = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(rst->regs_rst))
return PTR_ERR(rst->regs_rst);
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 35d8dd4cccfc..36ec95518905 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -139,7 +139,6 @@ static const struct reset_control_ops lpc18xx_rgu_ops = {
static int lpc18xx_rgu_probe(struct platform_device *pdev)
{
struct lpc18xx_rgu_data *rc;
- struct resource *res;
u32 fcclk, firc;
int ret;
@@ -147,8 +146,7 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev)
if (!rc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rc->base = devm_ioremap_resource(&pdev->dev, res);
+ rc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc->base))
return PTR_ERR(rc->base);
diff --git a/drivers/reset/reset-meson-audio-arb.c b/drivers/reset/reset-meson-audio-arb.c
index 1dc06e08a8da..6a3f6a6a3bbf 100644
--- a/drivers/reset/reset-meson-audio-arb.c
+++ b/drivers/reset/reset-meson-audio-arb.c
@@ -151,11 +151,8 @@ static int meson_audio_arb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, arb);
arb->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(arb->clk)) {
- if (PTR_ERR(arb->clk) != -EPROBE_DEFER)
- dev_err(dev, "failed to get clock\n");
- return PTR_ERR(arb->clk);
- }
+ if (IS_ERR(arb->clk))
+ return dev_err_probe(dev, PTR_ERR(arb->clk), "failed to get clock\n");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arb->regs = devm_ioremap_resource(dev, res);
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index 26dc54778615..13878ca2779d 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -116,14 +116,12 @@ MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
static int meson_reset_probe(struct platform_device *pdev)
{
struct meson_reset *data;
- struct resource *res;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ data->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->reg_base))
return PTR_ERR(data->reg_base);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
deleted file mode 100644
index 8209f922dc16..000000000000
--- a/drivers/reset/reset-oxnas.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Oxford Semiconductor Reset Controller driver
- *
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2009 Oxford Semiconductor Ltd
- */
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reset-controller.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-
-/* Regmap offsets */
-#define RST_SET_REGOFFSET 0x34
-#define RST_CLR_REGOFFSET 0x38
-
-struct oxnas_reset {
- struct regmap *regmap;
- struct reset_controller_dev rcdev;
-};
-
-static int oxnas_reset_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct oxnas_reset *data =
- container_of(rcdev, struct oxnas_reset, rcdev);
-
- regmap_write(data->regmap, RST_SET_REGOFFSET, BIT(id));
- msleep(50);
- regmap_write(data->regmap, RST_CLR_REGOFFSET, BIT(id));
-
- return 0;
-}
-
-static int oxnas_reset_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct oxnas_reset *data =
- container_of(rcdev, struct oxnas_reset, rcdev);
-
- regmap_write(data->regmap, RST_SET_REGOFFSET, BIT(id));
-
- return 0;
-}
-
-static int oxnas_reset_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
-{
- struct oxnas_reset *data =
- container_of(rcdev, struct oxnas_reset, rcdev);
-
- regmap_write(data->regmap, RST_CLR_REGOFFSET, BIT(id));
-
- return 0;
-}
-
-static const struct reset_control_ops oxnas_reset_ops = {
- .reset = oxnas_reset_reset,
- .assert = oxnas_reset_assert,
- .deassert = oxnas_reset_deassert,
-};
-
-static const struct of_device_id oxnas_reset_dt_ids[] = {
- { .compatible = "oxsemi,ox810se-reset", },
- { .compatible = "oxsemi,ox820-reset", },
- { /* sentinel */ },
-};
-
-static int oxnas_reset_probe(struct platform_device *pdev)
-{
- struct oxnas_reset *data;
- struct device *parent;
-
- parent = pdev->dev.parent;
- if (!parent) {
- dev_err(&pdev->dev, "no parent\n");
- return -ENODEV;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->regmap = syscon_node_to_regmap(parent->of_node);
- if (IS_ERR(data->regmap)) {
- dev_err(&pdev->dev, "failed to get parent regmap\n");
- return PTR_ERR(data->regmap);
- }
-
- platform_set_drvdata(pdev, data);
-
- data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = 32;
- data->rcdev.ops = &oxnas_reset_ops;
- data->rcdev.of_node = pdev->dev.of_node;
-
- return devm_reset_controller_register(&pdev->dev, &data->rcdev);
-}
-
-static struct platform_driver oxnas_reset_driver = {
- .probe = oxnas_reset_probe,
- .driver = {
- .name = "oxnas-reset",
- .of_match_table = oxnas_reset_dt_ids,
- },
-};
-builtin_platform_driver(oxnas_reset_driver);
diff --git a/drivers/reset/starfive/Kconfig b/drivers/reset/starfive/Kconfig
index 1fa706a2c3dc..d832339f61bc 100644
--- a/drivers/reset/starfive/Kconfig
+++ b/drivers/reset/starfive/Kconfig
@@ -13,7 +13,8 @@ config RESET_STARFIVE_JH7100
config RESET_STARFIVE_JH7110
bool "StarFive JH7110 Reset Driver"
- depends on AUXILIARY_BUS && CLK_STARFIVE_JH7110_SYS
+ depends on CLK_STARFIVE_JH7110_SYS
+ select AUXILIARY_BUS
select RESET_STARFIVE_JH71X0
default ARCH_STARFIVE
help
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 9455e1c7a5aa..a2622e146b8b 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -1,11 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
if ARCH_STI
-config STI_RESET_SYSCFG
- bool
-
config STIH407_RESET
bool
- select STI_RESET_SYSCFG
endif
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index 3eb30f7e8e3d..5e833496cee3 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
-
-obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
+obj-$(CONFIG_STIH407_RESET) += reset-stih407.o reset-syscfg.o
diff --git a/drivers/reset/sti/reset-syscfg.c b/drivers/reset/sti/reset-syscfg.c
index b4b46e0f207e..c1ba04f6f155 100644
--- a/drivers/reset/sti/reset-syscfg.c
+++ b/drivers/reset/sti/reset-syscfg.c
@@ -64,22 +64,12 @@ static int syscfg_reset_program_hw(struct reset_controller_dev *rcdev,
return err;
if (ch->ack) {
- unsigned long timeout = jiffies + msecs_to_jiffies(1000);
u32 ack_val;
- while (true) {
- err = regmap_field_read(ch->ack, &ack_val);
- if (err)
- return err;
-
- if (ack_val == ctrl_val)
- break;
-
- if (time_after(jiffies, timeout))
- return -ETIME;
-
- cpu_relax();
- }
+ err = regmap_field_read_poll_timeout(ch->ack, ack_val, (ack_val == ctrl_val),
+ 100, USEC_PER_SEC);
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index ca85bddb582b..b18dd4591492 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -415,9 +415,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
hostdata->flags = flags;
INIT_WORK(&hostdata->main_task, NCR5380_main);
- hostdata->work_q = alloc_workqueue("ncr5380_%d",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
- 1, instance->host_no);
+ hostdata->work_q = alloc_ordered_workqueue("ncr5380_%d",
+ WQ_MEM_RECLAIM, instance->host_no);
if (!hostdata->work_q)
return -ENOMEM;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f0bc8bbb3938..222b0cff731c 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -294,9 +294,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (shost->transportt->create_work_queue) {
snprintf(shost->work_q_name, sizeof(shost->work_q_name),
"scsi_wq_%d", shost->host_no);
- shost->work_q = alloc_workqueue("%s",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, shost->work_q_name);
+ shost->work_q = alloc_ordered_workqueue("%s",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM,
+ shost->work_q_name);
if (!shost->work_q) {
error = -EINVAL;
@@ -510,9 +510,9 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv
goto fail;
}
- shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
- 1, shost->host_no);
+ shost->tmf_work_q = alloc_ordered_workqueue("scsi_tmf_%d",
+ WQ_MEM_RECLAIM | WQ_SYSFS,
+ shost->host_no);
if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost,
"failed to create tmf workq\n");
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 0fda8905eabd..b31680d41f26 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2907,9 +2907,8 @@ struct Scsi_Host *iscsi_host_alloc(const struct scsi_host_template *sht,
ihost = shost_priv(shost);
if (xmit_can_sleep) {
- ihost->workq = alloc_workqueue("iscsi_q_%d",
- WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 1, shost->host_no);
+ ihost->workq = alloc_ordered_workqueue("iscsi_q_%d",
+ WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM, shost->host_no);
if (!ihost->workq)
goto free_host;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index dc670304f181..adcac57aaee6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1198,37 +1198,37 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
sas_route_char(child, child_phy));
}
+static bool sas_eeds_valid(struct domain_device *parent,
+ struct domain_device *child)
+{
+ struct sas_discovery *disc = &parent->port->disc;
+
+ return (SAS_ADDR(disc->eeds_a) == SAS_ADDR(parent->sas_addr) ||
+ SAS_ADDR(disc->eeds_a) == SAS_ADDR(child->sas_addr)) &&
+ (SAS_ADDR(disc->eeds_b) == SAS_ADDR(parent->sas_addr) ||
+ SAS_ADDR(disc->eeds_b) == SAS_ADDR(child->sas_addr));
+}
+
static int sas_check_eeds(struct domain_device *child,
- struct ex_phy *parent_phy,
- struct ex_phy *child_phy)
+ struct ex_phy *parent_phy,
+ struct ex_phy *child_phy)
{
int res = 0;
struct domain_device *parent = child->parent;
+ struct sas_discovery *disc = &parent->port->disc;
- if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) {
+ if (SAS_ADDR(disc->fanout_sas_addr) != 0) {
res = -ENODEV;
pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
child_phy->phy_id,
- SAS_ADDR(parent->port->disc.fanout_sas_addr));
- } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) {
- memcpy(parent->port->disc.eeds_a, parent->sas_addr,
- SAS_ADDR_SIZE);
- memcpy(parent->port->disc.eeds_b, child->sas_addr,
- SAS_ADDR_SIZE);
- } else if (((SAS_ADDR(parent->port->disc.eeds_a) ==
- SAS_ADDR(parent->sas_addr)) ||
- (SAS_ADDR(parent->port->disc.eeds_a) ==
- SAS_ADDR(child->sas_addr)))
- &&
- ((SAS_ADDR(parent->port->disc.eeds_b) ==
- SAS_ADDR(parent->sas_addr)) ||
- (SAS_ADDR(parent->port->disc.eeds_b) ==
- SAS_ADDR(child->sas_addr))))
- ;
- else {
+ SAS_ADDR(disc->fanout_sas_addr));
+ } else if (SAS_ADDR(disc->eeds_a) == 0) {
+ memcpy(disc->eeds_a, parent->sas_addr, SAS_ADDR_SIZE);
+ memcpy(disc->eeds_b, child->sas_addr, SAS_ADDR_SIZE);
+ } else if (!sas_eeds_valid(parent, child)) {
res = -ENODEV;
pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
SAS_ADDR(parent->sas_addr),
@@ -1240,11 +1240,56 @@ static int sas_check_eeds(struct domain_device *child,
return res;
}
-/* Here we spill over 80 columns. It is intentional.
- */
-static int sas_check_parent_topology(struct domain_device *child)
+static int sas_check_edge_expander_topo(struct domain_device *child,
+ struct ex_phy *parent_phy)
+{
+ struct expander_device *child_ex = &child->ex_dev;
+ struct expander_device *parent_ex = &child->parent->ex_dev;
+ struct ex_phy *child_phy;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
+ if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
+ child_phy->routing_attr != TABLE_ROUTING)
+ goto error;
+ } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
+ if (child_phy->routing_attr == SUBTRACTIVE_ROUTING)
+ return sas_check_eeds(child, parent_phy, child_phy);
+ else if (child_phy->routing_attr != TABLE_ROUTING)
+ goto error;
+ } else if (parent_phy->routing_attr == TABLE_ROUTING) {
+ if (child_phy->routing_attr != SUBTRACTIVE_ROUTING &&
+ (child_phy->routing_attr != TABLE_ROUTING ||
+ !child_ex->t2t_supp || !parent_ex->t2t_supp))
+ goto error;
+ }
+
+ return 0;
+error:
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ return -ENODEV;
+}
+
+static int sas_check_fanout_expander_topo(struct domain_device *child,
+ struct ex_phy *parent_phy)
{
struct expander_device *child_ex = &child->ex_dev;
+ struct ex_phy *child_phy;
+
+ child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
+
+ if (parent_phy->routing_attr == TABLE_ROUTING &&
+ child_phy->routing_attr == SUBTRACTIVE_ROUTING)
+ return 0;
+
+ sas_print_parent_topology_bug(child, parent_phy, child_phy);
+
+ return -ENODEV;
+}
+
+static int sas_check_parent_topology(struct domain_device *child)
+{
struct expander_device *parent_ex;
int i;
int res = 0;
@@ -1259,7 +1304,6 @@ static int sas_check_parent_topology(struct domain_device *child)
for (i = 0; i < parent_ex->num_phys; i++) {
struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
- struct ex_phy *child_phy;
if (parent_phy->phy_state == PHY_VACANT ||
parent_phy->phy_state == PHY_NOT_PRESENT)
@@ -1268,40 +1312,14 @@ static int sas_check_parent_topology(struct domain_device *child)
if (!sas_phy_match_dev_addr(child, parent_phy))
continue;
- child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
-
switch (child->parent->dev_type) {
case SAS_EDGE_EXPANDER_DEVICE:
- if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
- if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
- child_phy->routing_attr != TABLE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
- if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) {
- res = sas_check_eeds(child, parent_phy, child_phy);
- } else if (child_phy->routing_attr != TABLE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- } else if (parent_phy->routing_attr == TABLE_ROUTING) {
- if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
- (child_phy->routing_attr == TABLE_ROUTING &&
- child_ex->t2t_supp && parent_ex->t2t_supp)) {
- /* All good */;
- } else {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
- res = -ENODEV;
- }
- }
+ if (sas_check_edge_expander_topo(child, parent_phy))
+ res = -ENODEV;
break;
case SAS_FANOUT_EXPANDER_DEVICE:
- if (parent_phy->routing_attr != TABLE_ROUTING ||
- child_phy->routing_attr != SUBTRACTIVE_ROUTING) {
- sas_print_parent_topology_bug(child, parent_phy, child_phy);
+ if (sas_check_fanout_expander_topo(child, parent_phy))
res = -ENODEV;
- }
break;
default:
break;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3863a5341782..21c7ecd3ede5 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5858,8 +5858,8 @@ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
module_param(lpfc_fabric_cgn_frequency, int, 0444);
MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
-int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
-module_param(lpfc_acqe_cgn_frequency, int, 0444);
+unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
+module_param(lpfc_acqe_cgn_frequency, byte, 0444);
MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b833b983e69d..d4e46a08f94d 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb);
-int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp);
struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
void lpfc_disc_list_loopmap(struct lpfc_vport *);
void lpfc_disc_start(struct lpfc_vport *);
@@ -248,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
irqreturn_t lpfc_sli4_intr_handler(int, void *);
irqreturn_t lpfc_sli4_hba_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id);
int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len);
@@ -664,7 +664,7 @@ extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
extern unsigned long lpfc_no_hba_reset[];
-extern int lpfc_acqe_cgn_frequency;
+extern unsigned char lpfc_acqe_cgn_frequency;
extern int lpfc_fabric_cgn_frequency;
extern int lpfc_use_cgn_signal;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6a15f879e517..a3c8550e9985 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5205,14 +5205,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
*
* This routine is the completion callback function to the Logout (LOGO)
* Accept (ACC) Response ELS command. This routine is invoked to indicate
- * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
- * release the ndlp if it has the last reference remaining (reference count
- * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp
- * field to NULL to inform the following lpfc_els_free_iocb() routine no
- * ndlp reference count needs to be decremented. Otherwise, the ndlp
- * reference use-count shall be decremented by the lpfc_els_free_iocb()
- * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
- * IOCB data structure.
+ * the completion of the LOGO process. If the node has transitioned to NPR,
+ * this routine unregisters the RPI if it is still registered. The
+ * lpfc_els_free_iocb() is invoked to release the IOCB data structure.
**/
static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -5253,19 +5248,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(ndlp->nlp_last_elscmd == ELS_CMD_PLOGI))
goto out;
- /* NPort Recovery mode or node is just allocated */
- if (!lpfc_nlp_not_used(ndlp)) {
- /* A LOGO is completing and the node is in NPR state.
- * Just unregister the RPI because the node is still
- * required.
- */
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
lpfc_unreg_rpi(vport, ndlp);
- } else {
- /* Indicate the node has already released, should
- * not reference to it from within lpfc_els_free_iocb.
- */
- cmdiocb->ndlp = NULL;
- }
+
}
out:
/*
@@ -5285,9 +5270,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* RPI (Remote Port Index) mailbox command to the @phba. It simply releases
* the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
* decrements the ndlp reference count held for this completion callback
- * function. After that, it invokes the lpfc_nlp_not_used() to check
- * whether there is only one reference left on the ndlp. If so, it will
- * perform one more decrement and trigger the release of the ndlp.
+ * function. After that, it invokes the lpfc_drop_node to check
+ * whether it is appropriate to release the node.
**/
void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5ba3a9ad9501..67bfdddb897c 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4333,13 +4333,14 @@ out:
/* If the node is not registered with the scsi or nvme
* transport, remove the fabric node. The failed reg_login
- * is terminal.
+ * is terminal and forces the removal of the last node
+ * reference.
*/
if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
spin_unlock_irq(&ndlp->lock);
- lpfc_nlp_not_used(ndlp);
+ lpfc_nlp_put(ndlp);
}
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
@@ -6704,25 +6705,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
}
-/* This routine free's the specified nodelist if it is not in use
- * by any other discovery thread. This routine returns 1 if the
- * ndlp has been freed. A return value of 0 indicates the ndlp is
- * not yet been released.
- */
-int
-lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
-{
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
- "node not used: did:x%x flg:x%x refcnt:x%x",
- ndlp->nlp_DID, ndlp->nlp_flag,
- kref_read(&ndlp->kref));
-
- if (kref_read(&ndlp->kref) == 1)
- if (lpfc_nlp_put(ndlp))
- return 1;
- return 0;
-}
-
/**
* lpfc_fcf_inuse - Check if FCF can be unregistered.
* @phba: Pointer to hba context object.
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 58fa39c403a0..082f8a109e55 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -536,9 +536,9 @@ struct sli4_wcqe_xri_aborted {
/* completion queue entry structure for rqe completion */
struct lpfc_rcqe {
uint32_t word0;
-#define lpfc_rcqe_bindex_SHIFT 16
-#define lpfc_rcqe_bindex_MASK 0x0000FFF
-#define lpfc_rcqe_bindex_WORD word0
+#define lpfc_rcqe_iv_SHIFT 31
+#define lpfc_rcqe_iv_MASK 0x00000001
+#define lpfc_rcqe_iv_WORD word0
#define lpfc_rcqe_status_SHIFT 8
#define lpfc_rcqe_status_MASK 0x000000FF
#define lpfc_rcqe_status_WORD word0
@@ -546,6 +546,7 @@ struct lpfc_rcqe {
#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */
#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */
#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */
+#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */
uint32_t word1;
#define lpfc_rcqe_fcf_id_v1_SHIFT 0
#define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F
@@ -4813,8 +4814,8 @@ struct cmf_sync_wqe {
#define cmf_sync_cqid_WORD word11
uint32_t read_bytes;
uint32_t word13;
-#define cmf_sync_period_SHIFT 16
-#define cmf_sync_period_MASK 0x0000ffff
+#define cmf_sync_period_SHIFT 24
+#define cmf_sync_period_MASK 0x000000ff
#define cmf_sync_period_WORD word13
uint32_t word14;
uint32_t word15;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 867b4c788f08..088bd75fb5d7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1279,7 +1279,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
/*
* lpfc_idle_stat_delay_work - idle_stat tracking
*
- * This routine tracks per-cq idle_stat and determines polling decisions.
+ * This routine tracks per-eq idle_stat and determines polling decisions.
*
* Return codes:
* None
@@ -1290,7 +1290,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
struct lpfc_hba *phba = container_of(to_delayed_work(work),
struct lpfc_hba,
idle_stat_delay_work);
- struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
struct lpfc_sli4_hdw_queue *hdwq;
struct lpfc_idle_stat *idle_stat;
u32 i, idle_percent;
@@ -1306,10 +1306,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
- cq = hdwq->io_cq;
+ eq = hdwq->hba_eq;
- /* Skip if we've already handled this cq's primary CPU */
- if (cq->chann != i)
+ /* Skip if we've already handled this eq's primary CPU */
+ if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
@@ -1333,9 +1333,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work)
idle_percent = 100 - idle_percent;
if (idle_percent < 15)
- cq->poll_mode = LPFC_QUEUE_WORK;
+ eq->poll_mode = LPFC_QUEUE_WORK;
else
- cq->poll_mode = LPFC_IRQ_POLL;
+ eq->poll_mode = LPFC_THREADED_IRQ;
idle_stat->prev_idle = wall_idle;
idle_stat->prev_wall = wall;
@@ -4357,6 +4357,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
struct lpfc_sli4_hdw_queue *qp;
struct lpfc_io_buf *lpfc_cmd;
int idx, cnt;
+ unsigned long iflags;
qp = phba->sli4_hba.hdwq;
cnt = 0;
@@ -4371,12 +4372,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
- spin_lock(&qp->io_buf_list_put_lock);
+ spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags);
list_add_tail(&lpfc_cmd->list,
&qp->lpfc_io_buf_list_put);
qp->put_io_bufs++;
qp->total_io_bufs++;
- spin_unlock(&qp->io_buf_list_put_lock);
+ spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
+ iflags);
}
}
return cnt;
@@ -13117,8 +13119,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
}
eqhdl->irq = rc;
- rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0,
- name, eqhdl);
+ rc = request_threaded_irq(eqhdl->irq,
+ &lpfc_sli4_hba_intr_handler,
+ &lpfc_sli4_hba_intr_handler_th,
+ IRQF_ONESHOT, name, eqhdl);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index adda70423c77..82730a89ecb5 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1893,38 +1893,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
pnvme_rport->port_id,
pnvme_fcreq);
- /* If the hba is getting reset, this flag is set. It is
- * cleared when the reset is complete and rings reestablished.
- */
- spin_lock_irqsave(&phba->hbalock, flags);
- /* driver queued commands are in process of being flushed */
- if (phba->hba_flag & HBA_IOQ_FLUSH) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "6139 Driver in reset cleanup - flushing "
- "NVME Req now. hba_flag x%x\n",
- phba->hba_flag);
- return;
- }
-
lpfc_nbuf = freqpriv->nvme_buf;
if (!lpfc_nbuf) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6140 NVME IO req has no matching lpfc nvme "
"io buffer. Skipping abort req.\n");
return;
} else if (!lpfc_nbuf->nvmeCmd) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6141 lpfc NVME IO req has no nvme_fcreq "
"io buffer. Skipping abort req.\n");
return;
}
- nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/* Guard against IO completion being called at same time */
- spin_lock(&lpfc_nbuf->buf_lock);
+ spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags);
+
+ /* If the hba is getting reset, this flag is set. It is
+ * cleared when the reset is complete and rings reestablished.
+ */
+ spin_lock(&phba->hbalock);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_IOQ_FLUSH) {
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "6139 Driver in reset cleanup - flushing "
+ "NVME Req now. hba_flag x%x\n",
+ phba->hba_flag);
+ return;
+ }
+
+ nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
/*
* The lpfc_nbuf and the mapped nvme_fcreq in the driver's
@@ -1971,8 +1971,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
lpfc_nvme_abort_fcreq_cmpl);
- spin_unlock(&lpfc_nbuf->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
/* Make sure HBA is alive */
lpfc_issue_hb_tmo(phba);
@@ -1998,8 +1998,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
out_unlock:
- spin_unlock(&lpfc_nbuf->buf_lock);
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock(&phba->hbalock);
+ spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index e989f130434e..49aa86c477c6 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4273,7 +4273,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
"x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3),
- (u64)scsi_get_lba(cmd),
+ (cmd->device->sector_size) ?
+ (u64)scsi_get_lba(cmd) : 0,
cmd->retries, scsi_get_resid(cmd));
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8693578888f1..22708f66be64 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
struct lpfc_queue *eq,
- struct lpfc_eqe *eqe);
+ struct lpfc_eqe *eqe,
+ enum lpfc_poll_mode poll_mode);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
@@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
static int
lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
- uint8_t rearm)
+ u8 rearm, enum lpfc_poll_mode poll_mode)
{
struct lpfc_eqe *eqe;
int count = 0, consumed = 0;
@@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
eqe = lpfc_sli4_eq_get(eq);
while (eqe) {
- lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
+ lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
__lpfc_sli4_consume_eqe(phba, eq, eqe);
consumed++;
@@ -1931,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
unsigned long iflags;
u32 ret_val;
u32 atot, wtot, max;
- u16 warn_sync_period = 0;
+ u8 warn_sync_period = 0;
/* First address any alarm / warning activity */
atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
@@ -7957,7 +7958,7 @@ out_rdf:
* lpfc_init_idle_stat_hb - Initialize idle_stat tracking
* @phba: pointer to lpfc hba data structure.
*
- * This routine initializes the per-cq idle_stat to dynamically dictate
+ * This routine initializes the per-eq idle_stat to dynamically dictate
* polling decisions.
*
* Return codes:
@@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
{
int i;
struct lpfc_sli4_hdw_queue *hdwq;
- struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
struct lpfc_idle_stat *idle_stat;
u64 wall;
for_each_present_cpu(i) {
hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
- cq = hdwq->io_cq;
+ eq = hdwq->hba_eq;
- /* Skip if we've already handled this cq's primary CPU */
- if (cq->chann != i)
+ /* Skip if we've already handled this eq's primary CPU */
+ if (eq->chann != i)
continue;
idle_stat = &phba->sli4_hba.idle_stat[i];
@@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
idle_stat->prev_wall = wall;
if (phba->nvmet_support ||
- phba->cmf_active_mode != LPFC_CFG_OFF)
- cq->poll_mode = LPFC_QUEUE_WORK;
+ phba->cmf_active_mode != LPFC_CFG_OFF ||
+ phba->intr_type != MSIX)
+ eq->poll_mode = LPFC_QUEUE_WORK;
else
- cq->poll_mode = LPFC_IRQ_POLL;
+ eq->poll_mode = LPFC_THREADED_IRQ;
}
- if (!phba->nvmet_support)
+ if (!phba->nvmet_support && phba->intr_type == MSIX)
schedule_delayed_work(&phba->idle_stat_delay_work,
msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
}
@@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
if (mbox_pending)
/* process and rearm the EQ */
- lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+ lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_QUEUE_WORK);
else
/* Always clear and re-arm the EQ */
sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
@@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
* will be handled through a sched from polling timer
* function which is currently triggered every 1msec.
*/
- lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
+ LPFC_QUEUE_WORK);
}
/**
@@ -14682,6 +14686,38 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case FC_STATUS_RQ_DMA_FAILURE:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2564 RQE DMA Error x%x, x%08x x%08x x%08x "
+ "x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+
+ /* If IV set, no further recovery */
+ if (bf_get(lpfc_rcqe_iv, rcqe))
+ break;
+
+ /* recycle consumed resource */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
+ dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ }
+ hrq->RQ_rcv_buf++;
+ hrq->RQ_buf_posted--;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2565 Unexpected RQE Status x%x, w0-3 x%08x "
+ "x%08x x%08x x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+ break;
}
out:
return workposted;
@@ -14803,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
* @cq: Pointer to CQ to be processed
* @handler: Routine to process each cqe
* @delay: Pointer to usdelay to set in case of rescheduling of the handler
- * @poll_mode: Polling mode we were called from
*
* This routine processes completion queue entries in a CQ. While a valid
* queue element is found, the handler is called. During processing checks
@@ -14821,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
static bool
__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
- struct lpfc_cqe *), unsigned long *delay,
- enum lpfc_poll_mode poll_mode)
+ struct lpfc_cqe *), unsigned long *delay)
{
struct lpfc_cqe *cqe;
bool workposted = false;
@@ -14863,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
arm = false;
}
- /* Note: complete the irq_poll softirq before rearming CQ */
- if (poll_mode == LPFC_IRQ_POLL)
- irq_poll_complete(&cq->iop);
-
/* Track the max number of CQEs processed in 1 EQ */
if (count > cq->CQ_max_cqe)
cq->CQ_max_cqe = count;
@@ -14916,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
case LPFC_MCQ:
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_mcqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
break;
case LPFC_WCQ:
if (cq->subtype == LPFC_IO)
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_fp_handle_cqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
else
workposted |= __lpfc_sli4_process_cq(phba, cq,
lpfc_sli4_sp_handle_cqe,
- &delay, LPFC_QUEUE_WORK);
+ &delay);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -15203,6 +15233,38 @@ drop:
hrq->RQ_no_posted_buf++;
/* Post more buffers if possible */
break;
+ case FC_STATUS_RQ_DMA_FAILURE:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2575 RQE DMA Error x%x, x%08x x%08x x%08x "
+ "x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+
+ /* If IV set, no further recovery */
+ if (bf_get(lpfc_rcqe_iv, rcqe))
+ break;
+
+ /* recycle consumed resource */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
+ dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ break;
+ }
+ hrq->RQ_rcv_buf++;
+ hrq->RQ_buf_posted--;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_rq_buf_free(phba, &dma_buf->hbuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "2576 Unexpected RQE Status x%x, w0-3 x%08x "
+ "x%08x x%08x x%08x\n",
+ status, rcqe->word0, rcqe->word1,
+ rcqe->word2, rcqe->word3);
+ break;
}
out:
return workposted;
@@ -15271,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
}
/**
- * lpfc_sli4_sched_cq_work - Schedules cq work
- * @phba: Pointer to HBA context object.
- * @cq: Pointer to CQ
- * @cqid: CQ ID
- *
- * This routine checks the poll mode of the CQ corresponding to
- * cq->chann, then either schedules a softirq or queue_work to complete
- * cq work.
+ * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @cq: Pointer to CQ to be processed
*
- * queue_work path is taken if in NVMET mode, or if poll_mode is in
- * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
+ * This routine calls the cq processing routine with the handler for
+ * fast path CQEs.
*
+ * The CQ routine returns two values: the first is the calling status,
+ * which indicates whether work was queued to the background discovery
+ * thread. If true, the routine should wakeup the discovery thread;
+ * the second is the delay parameter. If non-zero, rather than rearming
+ * the CQ and yet another interrupt, the CQ handler should be queued so
+ * that it is processed in a subsequent polling action. The value of
+ * the delay indicates when to reschedule it.
**/
-static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
- struct lpfc_queue *cq, uint16_t cqid)
+static void
+__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
{
- int ret = 0;
+ struct lpfc_hba *phba = cq->phba;
+ unsigned long delay;
+ bool workposted = false;
+ int ret;
- switch (cq->poll_mode) {
- case LPFC_IRQ_POLL:
- /* CGN mgmt is mutually exclusive from softirq processing */
- if (phba->cmf_active_mode == LPFC_CFG_OFF) {
- irq_poll_sched(&cq->iop);
- break;
- }
- fallthrough;
- case LPFC_QUEUE_WORK:
- default:
+ /* process and rearm the CQ */
+ workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
+ &delay);
+
+ if (delay) {
if (is_kdump_kernel())
- ret = queue_work(phba->wq, &cq->irqwork);
+ ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
+ delay);
else
- ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
+ ret = queue_delayed_work_on(cq->chann, phba->wq,
+ &cq->sched_irqwork, delay);
if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0383 Cannot schedule queue work "
- "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
- cqid, cq->queue_id,
- raw_smp_processor_id());
+ "0367 Cannot schedule queue work "
+ "for cqid=%d on CPU %d\n",
+ cq->queue_id, cq->chann);
}
+
+ /* wake up worker thread if there are works to be done */
+ if (workposted)
+ lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_hba_process_cq - fast-path work handler when started by
+ * interrupt
+ * @work: pointer to work element
+ *
+ * translates from the work handler and calls the fast-path handler.
+ **/
+static void
+lpfc_sli4_hba_process_cq(struct work_struct *work)
+{
+ struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
+
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -15317,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
* @phba: Pointer to HBA context object.
* @eq: Pointer to the queue structure.
* @eqe: Pointer to fast-path event queue entry.
+ * @poll_mode: poll_mode to execute processing the cq.
*
* This routine process a event queue entry from the fast-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a
@@ -15327,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
**/
static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
- struct lpfc_eqe *eqe)
+ struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
{
struct lpfc_queue *cq = NULL;
uint32_t qidx = eq->hdwq;
uint16_t cqid, id;
+ int ret;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -15391,70 +15474,25 @@ work_cq:
else
cq->isr_timestamp = 0;
#endif
- lpfc_sli4_sched_cq_work(phba, cq, cqid);
-}
-/**
- * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
- * @cq: Pointer to CQ to be processed
- * @poll_mode: Enum lpfc_poll_state to determine poll mode
- *
- * This routine calls the cq processing routine with the handler for
- * fast path CQEs.
- *
- * The CQ routine returns two values: the first is the calling status,
- * which indicates whether work was queued to the background discovery
- * thread. If true, the routine should wakeup the discovery thread;
- * the second is the delay parameter. If non-zero, rather than rearming
- * the CQ and yet another interrupt, the CQ handler should be queued so
- * that it is processed in a subsequent polling action. The value of
- * the delay indicates when to reschedule it.
- **/
-static void
-__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
- enum lpfc_poll_mode poll_mode)
-{
- struct lpfc_hba *phba = cq->phba;
- unsigned long delay;
- bool workposted = false;
- int ret = 0;
-
- /* process and rearm the CQ */
- workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
- &delay, poll_mode);
-
- if (delay) {
+ switch (poll_mode) {
+ case LPFC_THREADED_IRQ:
+ __lpfc_sli4_hba_process_cq(cq);
+ break;
+ case LPFC_QUEUE_WORK:
+ default:
if (is_kdump_kernel())
- ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
- delay);
+ ret = queue_work(phba->wq, &cq->irqwork);
else
- ret = queue_delayed_work_on(cq->chann, phba->wq,
- &cq->sched_irqwork, delay);
+ ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
if (!ret)
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0367 Cannot schedule queue work "
- "for cqid=%d on CPU %d\n",
- cq->queue_id, cq->chann);
+ "0383 Cannot schedule queue work "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id,
+ raw_smp_processor_id());
+ break;
}
-
- /* wake up worker thread if there are works to be done */
- if (workposted)
- lpfc_worker_wake_up(phba);
-}
-
-/**
- * lpfc_sli4_hba_process_cq - fast-path work handler when started by
- * interrupt
- * @work: pointer to work element
- *
- * translates from the work handler and calls the fast-path handler.
- **/
-static void
-lpfc_sli4_hba_process_cq(struct work_struct *work)
-{
- struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
-
- __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
}
/**
@@ -15469,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
struct lpfc_queue *cq = container_of(to_delayed_work(work),
struct lpfc_queue, sched_irqwork);
- __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
+ __lpfc_sli4_hba_process_cq(cq);
}
/**
@@ -15495,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
* and returns for these events. This function is called without any lock
* held. It gets the hbalock to access and update SLI data structures.
*
- * This function returns IRQ_HANDLED when interrupt is handled else it
- * returns IRQ_NONE.
+ * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
+ * when interrupt is scheduled to be handled from a threaded irq context, or
+ * else returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
@@ -15505,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq;
unsigned long iflag;
- int ecount = 0;
int hba_eqidx;
+ int ecount = 0;
struct lpfc_eq_intr_info *eqi;
/* Get the driver's phba structure from the dev_id */
@@ -15535,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
}
- eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
- eqi->icnt++;
-
- fpeq->last_cpu = raw_smp_processor_id();
+ switch (fpeq->poll_mode) {
+ case LPFC_THREADED_IRQ:
+ /* CGN mgmt is mutually exclusive from irq processing */
+ if (phba->cmf_active_mode == LPFC_CFG_OFF)
+ return IRQ_WAKE_THREAD;
+ fallthrough;
+ case LPFC_QUEUE_WORK:
+ default:
+ eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
+ eqi->icnt++;
- if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
- fpeq->q_flag & HBA_EQ_DELAY_CHK &&
- phba->cfg_auto_imax &&
- fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
- phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
- lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+ fpeq->last_cpu = raw_smp_processor_id();
- /* process and rearm the EQ */
- ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
+ LPFC_MAX_AUTO_EQ_DELAY);
- if (unlikely(ecount == 0)) {
- fpeq->EQ_no_entry++;
- if (phba->intr_type == MSIX)
- /* MSI-X treated interrupt served as no EQ share INT */
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "0358 MSI-X interrupt with no EQE\n");
- else
- /* Non MSI-X treated on interrupt as EQ share INT */
- return IRQ_NONE;
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_QUEUE_WORK);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "0358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
}
return IRQ_HANDLED;
@@ -16115,13 +16165,69 @@ out:
return status;
}
-static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
+/**
+ * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
+ * threaded irq context.
+ *
+ * Returns
+ * IRQ_HANDLED - interrupt is handled
+ * IRQ_NONE - otherwise
+ **/
+irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
{
- struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
+ struct lpfc_hba *phba;
+ struct lpfc_hba_eq_hdl *hba_eq_hdl;
+ struct lpfc_queue *fpeq;
+ int ecount = 0;
+ int hba_eqidx;
+ struct lpfc_eq_intr_info *eqi;
- __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
+ /* Get the driver's phba structure from the dev_id */
+ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+ phba = hba_eq_hdl->phba;
+ hba_eqidx = hba_eq_hdl->idx;
- return 1;
+ if (unlikely(!phba))
+ return IRQ_NONE;
+ if (unlikely(!phba->sli4_hba.hdwq))
+ return IRQ_NONE;
+
+ /* Get to the EQ struct associated with this vector */
+ fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
+
+ eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
+ eqi->icnt++;
+
+ fpeq->last_cpu = raw_smp_processor_id();
+
+ if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
+ fpeq->q_flag & HBA_EQ_DELAY_CHK &&
+ phba->cfg_auto_imax &&
+ fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
+ phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
+ lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
+
+ /* process and rearm the EQ */
+ ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
+ LPFC_THREADED_IRQ);
+
+ if (unlikely(ecount == 0)) {
+ fpeq->EQ_no_entry++;
+ if (phba->intr_type == MSIX)
+ /* MSI-X treated interrupt served as no EQ share INT */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3358 MSI-X interrupt with no EQE\n");
+ else
+ /* Non MSI-X treated on interrupt as EQ share INT */
+ return IRQ_NONE;
+ }
+ return IRQ_HANDLED;
}
/**
@@ -16265,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (cq->queue_id > phba->sli4_hba.cq_max)
phba->sli4_hba.cq_max = cq->queue_id;
-
- irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 2a0864e6d7cd..2541a8fba093 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -140,7 +140,7 @@ struct lpfc_rqb {
enum lpfc_poll_mode {
LPFC_QUEUE_WORK,
- LPFC_IRQ_POLL
+ LPFC_THREADED_IRQ,
};
struct lpfc_idle_stat {
@@ -279,8 +279,6 @@ struct lpfc_queue {
struct list_head _poll_list;
void **q_pgs; /* array to index entries per page */
-#define LPFC_IRQ_POLL_WEIGHT 256
- struct irq_poll iop;
enum lpfc_poll_mode poll_mode;
};
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c97411b0992e..5fda8ac6b883 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.11"
+#define LPFC_DRIVER_VERSION "14.2.0.12"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index 4d84d5bd173f..82b55e955730 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -2058,7 +2058,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node),
GFP_KERNEL);
if (!sas_expander)
- return -1;
+ return -ENOMEM;
sas_expander->handle = handle;
sas_expander->num_phys = expander_pg0.num_phys;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 8b9490011e36..2e886c1d867d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -43,7 +43,8 @@
#include "pm8001_chips.h"
#include "pm80xx_hwi.h"
-static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING;
+static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING |
+ PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING;
module_param(logging_level, ulong, 0644);
MODULE_PARM_DESC(logging_level, " bits for enabling logging info.");
@@ -666,7 +667,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
* Currently we just set the fixed SAS address to our HBA, for manufacture,
* it should read from the EEPROM
*/
-static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i, j;
u8 sas_add[8];
@@ -679,6 +680,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
struct pm8001_ioctl_payload payload;
u16 deviceid;
int rc;
+ unsigned long time_remaining;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n");
+ return -EIO;
+ }
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
pm8001_ha->nvmd_completion = &completion;
@@ -703,16 +710,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
payload.offset = 0;
payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL);
if (!payload.func_specific) {
- pm8001_dbg(pm8001_ha, INIT, "mem alloc fail\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n");
+ return -ENOMEM;
}
rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
if (rc) {
kfree(payload.func_specific);
- pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n");
+ return -EIO;
+ }
+ time_remaining = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(60*1000)); // 1 min
+ if (!time_remaining) {
+ kfree(payload.func_specific);
+ pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
+ return -EIO;
}
- wait_for_completion(&completion);
+
for (i = 0, j = 0; i <= 7; i++, j++) {
if (pm8001_ha->chip_id == chip_8001) {
@@ -751,6 +765,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
SAS_ADDR_SIZE);
#endif
+ return 0;
}
/*
@@ -1166,7 +1181,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
pm80xx_set_thermal_config(pm8001_ha);
}
- pm8001_init_sas_add(pm8001_ha);
+ if (pm8001_init_sas_add(pm8001_ha))
+ goto err_out_shost;
/* phy setting support for motherboard controller */
rc = pm8001_configure_phy_settings(pm8001_ha);
if (rc)
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index dc1f4d958e03..953572fc0d9e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -71,6 +71,7 @@
#define PM8001_DEV_LOGGING 0x80 /* development message logging */
#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */
#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */
+#define PM8001_EVENT_LOGGING 0x400 /* HW event logging */
#define pm8001_info(HBA, fmt, ...) \
pr_info("%s:: %s %d: " fmt, \
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 9584cadc4201..39a12ee94a72 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3239,9 +3239,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct pm8001_port *port = &pm8001_ha->port[port_id];
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
- pm8001_dbg(pm8001_ha, DEVIO,
- "port id %d, phy id %d link_rate %d portstate 0x%x\n",
- port_id, phy_id, link_rate, portstate);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SATA_PHY_UP phyid:%#x port_id:%#x link_rate:%d portstate:%#x\n",
+ phy_id, port_id, link_rate, portstate);
phy->port = port;
port->port_id = port_id;
@@ -3291,10 +3291,14 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_attached = 0;
switch (portstate) {
case PORT_VALID:
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_VALID\n",
+ phy_id, port_id);
break;
case PORT_INVALID:
- pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n",
- port_id);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_INVALID\n",
+ phy_id, port_id);
pm8001_dbg(pm8001_ha, MSG,
" Last phy Down and port invalid\n");
if (port_sata) {
@@ -3306,18 +3310,21 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
sas_phy_disconnected(&phy->sas_phy);
break;
case PORT_IN_RESET:
- pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n",
- port_id);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_IN_RESET\n",
+ phy_id, port_id);
break;
case PORT_NOT_ESTABLISHED:
- pm8001_dbg(pm8001_ha, MSG,
- " Phy Down and PORT_NOT_ESTABLISHED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_NOT_ESTABLISHED\n",
+ phy_id, port_id);
port->port_attached = 0;
break;
case PORT_LOSTCOMM:
- pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n");
- pm8001_dbg(pm8001_ha, MSG,
- " Last phy Down and port invalid\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_LOSTCOMM\n",
+ phy_id, port_id);
+ pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n");
if (port_sata) {
port->port_attached = 0;
phy->phy_type = 0;
@@ -3328,9 +3335,9 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
default:
port->port_attached = 0;
- pm8001_dbg(pm8001_ha, DEVIO,
- " Phy Down and(default) = 0x%x\n",
- portstate);
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
break;
}
@@ -3410,6 +3417,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
u8 phy_id =
(u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+ u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
u16 eventType =
(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
u8 status =
@@ -3425,26 +3433,29 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
switch (eventType) {
case HW_EVENT_SAS_PHY_UP:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SAS_PHY_UP phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
hw_event_sas_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_PHY_UP:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n");
hw_event_sata_phy_up(pm8001_ha, piomb);
break;
case HW_EVENT_SATA_SPINUP_HOLD:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_SATA_SPINUP_HOLD phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
hw_event_phy_down(pm8001_ha, piomb);
- phy->phy_attached = 0;
phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_INVALID phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
@@ -3463,7 +3474,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PHY_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
@@ -3477,34 +3490,39 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_INVALID_DWORD\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_INVALID_DWORD phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_DISPARITY_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_DISPARITY_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_CODE_VIOLATION\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_CODE_VIOLATION phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_CODE_VIOLATION,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_MALFUNCTION:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_MALFUNCTION phyid:%#x\n", phy_id);
break;
case HW_EVENT_BROADCAST_SES:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n");
@@ -3515,25 +3533,30 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_INBOUND_CRC_ERROR phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_INBOUND_CRC_ERROR,
port_id, phy_id, 0, 0);
break;
case HW_EVENT_HARD_RESET_RECEIVED:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_HARD_RESET_RECEIVED phyid:%#x\n", phy_id);
sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_ID_FRAME_TIMEOUT phyid:%#x\n", phy_id);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_LINK_ERR_PHY_RESET_FAILED phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
port_id, phy_id, 0, 0);
@@ -3543,13 +3566,16 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RESET_TIMER_TMO phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
if (!pm8001_ha->phy[phy_id].reset_completion) {
pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
port_id, phy_id, 0, 0);
}
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
+ port->port_state = portstate;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
GFP_ATOMIC);
if (pm8001_ha->phy[phy_id].reset_completion) {
@@ -3560,8 +3586,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
- pm8001_dbg(pm8001_ha, MSG,
- "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RECOVERY_TIMER_TMO phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
pm80xx_hw_event_ack_req(pm8001_ha, 0,
HW_EVENT_PORT_RECOVERY_TIMER_TMO,
port_id, phy_id, 0, 0);
@@ -3575,24 +3602,32 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
break;
case HW_EVENT_PORT_RECOVER:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RECOVER phyid:%#x port_id:%#x\n",
+ phy_id, port_id);
hw_event_port_recover(pm8001_ha, piomb);
break;
case HW_EVENT_PORT_RESET_COMPLETE:
- pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n");
+ pm8001_dbg(pm8001_ha, EVENT,
+ "HW_EVENT_PORT_RESET_COMPLETE phyid:%#x port_id:%#x portstate:%#x\n",
+ phy_id, port_id, portstate);
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_SUCCESS;
complete(pm8001_ha->phy[phy_id].reset_completion);
pm8001_ha->phy[phy_id].reset_completion = NULL;
}
+ phy->phy_attached = 1;
+ phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+ port->port_state = portstate;
break;
case EVENT_BROADCAST_ASYNCH_EVENT:
pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n");
break;
default:
- pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n",
- eventType);
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown event portid:%d phyid:%d event:0x%x status:0x%x\n",
+ port_id, phy_id, eventType, status);
break;
}
return 0;
@@ -4726,6 +4761,9 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
SAS_ADDR_SIZE);
+ pm8001_dbg(pm8001_ha, INIT,
+ "register device req phy_id 0x%x port_id 0x%x\n", phy_id,
+ (port->port_id & 0xFF));
rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload,
sizeof(payload), 0);
if (rc)
@@ -4815,7 +4853,7 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.tag = cpu_to_le32(tag);
payload.ppc_phyid =
cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF));
- pm8001_dbg(pm8001_ha, INIT,
+ pm8001_dbg(pm8001_ha, DISC,
" phy profile command for phy %x ,length is %d\n",
le32_to_cpu(payload.ppc_phyid), length);
for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) {
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3b64de81ea0d..2a31ddc99dde 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3041,9 +3041,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
* addresses of our queues
*/
if (!qedf->p_cpuq) {
- status = -EINVAL;
QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
- goto mem_alloc_failure;
+ return -EINVAL;
}
qedf->global_queues = kzalloc((sizeof(struct global_queue *)
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 70cfc94c3d43..b00222459607 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2750,6 +2750,7 @@ static void
qla2x00_terminate_rport_io(struct fc_rport *rport)
{
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ scsi_qla_host_t *vha;
if (!fcport)
return;
@@ -2759,9 +2760,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
+ vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
/*
@@ -2786,6 +2790,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
qla2x00_port_logout(fcport->vha, fcport);
}
}
+
+ /* check for any straggling io left behind */
+ if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x300b,
+ "IO not return. Resetting. \n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index df5e5b7fdcfe..dfee3b41bdf1 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -465,6 +465,15 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
return res;
}
+struct tmf_arg {
+ struct qla_qpair *qpair;
+ struct fc_port *fcport;
+ struct scsi_qla_host *vha;
+ u64 lun;
+ u32 flags;
+ uint8_t modifier;
+};
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -544,6 +553,10 @@ struct srb_iocb {
uint32_t data;
struct completion comp;
__le16 comp_status;
+
+ uint8_t modifier;
+ uint8_t vp_index;
+ uint16_t loop_id;
} tmf;
struct {
#define SRB_FXDISC_REQ_DMA_VALID BIT_0
@@ -647,6 +660,7 @@ struct srb_iocb {
#define SRB_SA_UPDATE 25
#define SRB_ELS_CMD_HST_NOLOGIN 26
#define SRB_SA_REPLACE 27
+#define SRB_MARKER 28
struct qla_els_pt_arg {
u8 els_opcode;
@@ -2528,6 +2542,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
+ struct list_head tmf_pending;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2548,6 +2563,8 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;
uint8_t nvme_flag;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
@@ -5498,4 +5515,8 @@ struct ql_vnd_tgt_stats_resp {
_fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \
_fp->flags
+#define TMF_NOT_READY(_fcport) \
+ (!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \
+ !_fcport->vha->hw->flags.fw_started)
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 391c8b3623a6..ba7831f24734 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -69,7 +69,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
+extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ec0423ec6681..0df6eae7324e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1996,6 +1996,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;
+ if (sp->type == SRB_MARKER) {
+ complete(&tmf->u.tmf.comp);
+ return;
+ }
+
rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
@@ -2013,24 +2018,131 @@ qla2x00_tmf_iocb_timeout(void *data)
}
}
+static void qla_marker_sp_done(srb_t *sp, int res)
+{
+ struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+ if (res != QLA_SUCCESS)
+ ql_dbg(ql_dbg_taskm, sp->vha, 0x8004,
+ "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n",
+ sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags,
+ sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id);
+
+ sp->u.iocb_cmd.u.tmf.data = res;
+ complete(&tmf->u.tmf.comp);
+}
+
+#define START_SP_W_RETRIES(_sp, _rval) \
+{\
+ int cnt = 5; \
+ do { \
+ _rval = qla2x00_start_sp(_sp); \
+ if (_rval == EAGAIN) \
+ msleep(1); \
+ else \
+ break; \
+ cnt--; \
+ } while (cnt); \
+}
+
+/**
+ * qla26xx_marker: send marker IOCB and wait for the completion of it.
+ * @arg: pointer to argument list.
+ * It is assume caller will provide an fcport pointer and modifier
+ */
+static int
+qla26xx_marker(struct tmf_arg *arg)
+{
+ struct scsi_qla_host *vha = arg->vha;
+ struct srb_iocb *tm_iocb;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ fc_port_t *fcport = arg->fcport;
+
+ if (TMF_NOT_READY(arg->fcport)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8039,
+ "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
+ fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, arg->qpair->id);
+ return QLA_SUSPENDED;
+ }
+
+ /* ref: INIT */
+ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MARKER;
+ sp->name = "marker";
+ qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done);
+ sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
+
+ tm_iocb = &sp->u.iocb_cmd;
+ init_completion(&tm_iocb->u.tmf.comp);
+ tm_iocb->u.tmf.modifier = arg->modifier;
+ tm_iocb->u.tmf.lun = arg->lun;
+ tm_iocb->u.tmf.loop_id = fcport->loop_id;
+ tm_iocb->u.tmf.vp_index = vha->vp_idx;
+
+ START_SP_W_RETRIES(sp, rval);
+
+ ql_dbg(ql_dbg_taskm, vha, 0x8006,
+ "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, sp->qpair->id, rval);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8031,
+ "Marker IOCB send failure (%x).\n", rval);
+ goto done_free_sp;
+ }
+
+ wait_for_completion(&tm_iocb->u.tmf.comp);
+ rval = tm_iocb->u.tmf.data;
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8019,
+ "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, sp->qpair->id, rval);
+ }
+
+done_free_sp:
+ /* ref: INIT */
+ kref_put(&sp->cmd_kref, qla2x00_sp_release);
+done:
+ return rval;
+}
+
static void qla2x00_tmf_sp_done(srb_t *sp, int res)
{
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+ if (res)
+ tmf->u.tmf.data = res;
complete(&tmf->u.tmf.comp);
}
-int
-qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
- uint32_t tag)
+static int
+__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
+ fc_port_t *fcport = arg->fcport;
+
+ if (TMF_NOT_READY(arg->fcport)) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8032,
+ "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n",
+ fcport->loop_id, fcport->d_id.b24,
+ arg->modifier, arg->lun, arg->qpair->id);
+ return QLA_SUSPENDED;
+ }
+
/* ref: INIT */
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -2043,15 +2155,16 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb = &sp->u.iocb_cmd;
init_completion(&tm_iocb->u.tmf.comp);
- tm_iocb->u.tmf.flags = flags;
- tm_iocb->u.tmf.lun = lun;
+ tm_iocb->u.tmf.flags = arg->flags;
+ tm_iocb->u.tmf.lun = arg->lun;
+
+ START_SP_W_RETRIES(sp, rval);
ql_dbg(ql_dbg_taskm, vha, 0x802f,
- "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24,
+ arg->flags, arg->lun, sp->qpair->id, rval);
- rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
wait_for_completion(&tm_iocb->u.tmf.comp);
@@ -2063,15 +2176,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
"TM IOCB failed (%x).\n", rval);
}
- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
- flags = tm_iocb->u.tmf.flags;
- lun = (uint16_t)tm_iocb->u.tmf.lun;
-
- /* Issue Marker IOCB */
- qla2x00_marker(vha, vha->hw->base_qpair,
- fcport->loop_id, lun,
- flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
- }
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
+ rval = qla26xx_marker(arg);
done_free_sp:
/* ref: INIT */
@@ -2080,6 +2186,115 @@ done:
return rval;
}
+static void qla_put_tmf(fc_port_t *fcport)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ fcport->active_tmf--;
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+static
+int qla_get_tmf(fc_port_t *fcport)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ int rc = 0;
+ LIST_HEAD(tmf_elem);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ list_add_tail(&tmf_elem, &fcport->tmf_pending);
+
+ while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ msleep(1);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (TMF_NOT_READY(fcport)) {
+ ql_log(ql_log_warn, vha, 0x802c,
+ "Unable to acquire TM resource due to disruption.\n");
+ rc = EIO;
+ break;
+ }
+ if (fcport->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&tmf_elem, &fcport->tmf_pending))
+ break;
+ }
+
+ list_del(&tmf_elem);
+
+ if (!rc)
+ fcport->active_tmf++;
+
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ return rc;
+}
+
+int
+qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
+ uint32_t tag)
+{
+ struct scsi_qla_host *vha = fcport->vha;
+ struct qla_qpair *qpair;
+ struct tmf_arg a;
+ int i, rval = QLA_SUCCESS;
+
+ if (TMF_NOT_READY(fcport))
+ return QLA_SUSPENDED;
+
+ a.vha = fcport->vha;
+ a.fcport = fcport;
+ a.lun = lun;
+ if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
+ a.modifier = MK_SYNC_ID_LUN;
+
+ if (qla_get_tmf(fcport))
+ return QLA_FUNCTION_FAILED;
+ } else {
+ a.modifier = MK_SYNC_ID;
+ }
+
+ if (vha->hw->mqenable) {
+ for (i = 0; i < vha->hw->num_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+
+ if (TMF_NOT_READY(fcport)) {
+ ql_log(ql_log_warn, vha, 0x8026,
+ "Unable to send TM due to disruption.\n");
+ rval = QLA_SUSPENDED;
+ break;
+ }
+
+ a.qpair = qpair;
+ a.flags = flags|TCF_NOTMCMD_TO_TARGET;
+ rval = __qla2x00_async_tm_cmd(&a);
+ if (rval)
+ break;
+ }
+ }
+
+ if (rval)
+ goto bailout;
+
+ a.qpair = vha->hw->base_qpair;
+ a.flags = flags;
+ rval = __qla2x00_async_tm_cmd(&a);
+
+bailout:
+ if (a.modifier == MK_SYNC_ID_LUN)
+ qla_put_tmf(fcport);
+
+ return rval;
+}
+
int
qla24xx_async_abort_command(srb_t *sp)
{
@@ -5291,6 +5506,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->tmf_pending);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index b9b3e6f80ea9..6acfdcc48b16 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -522,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
return (QLA_FUNCTION_FAILED);
}
+ mrk24 = (struct mrk_entry_24xx *)mrk;
+
mrk->entry_type = MARKER_TYPE;
mrk->modifier = type;
if (type != MK_SYNC_ALL) {
if (IS_FWI2_CAPABLE(ha)) {
- mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
- mrk24->handle = make_handle(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16((uint16_t)lun);
}
}
+
+ if (IS_FWI2_CAPABLE(ha))
+ mrk24->handle = QLA_SKIP_HANDLE;
+
wmb();
qla2x00_start_iocbs(vha, req);
@@ -2541,7 +2545,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct req_que *req = vha->req;
+ struct req_que *req = sp->qpair->req;
flags = iocb->u.tmf.flags;
lun = iocb->u.tmf.lun;
@@ -2557,7 +2561,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[2] = fcport->d_id.b.domain;
tsk->vp_index = fcport->vha->vp_idx;
- if (flags == TCF_LUN_RESET) {
+ if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET|
+ TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
int_to_scsilun(lun, &tsk->lun);
host_to_fcp_swap((uint8_t *)&tsk->lun,
sizeof(tsk->lun));
@@ -3852,9 +3857,9 @@ static int qla_get_iocbs_resource(struct srb *sp)
case SRB_NACK_LOGO:
case SRB_LOGOUT_CMD:
case SRB_CTRL_VP:
- push_it_through = true;
- fallthrough;
+ case SRB_MARKER:
default:
+ push_it_through = true;
get_exch = false;
}
@@ -3870,6 +3875,19 @@ static int qla_get_iocbs_resource(struct srb *sp)
return qla_get_fw_resources(sp->qpair, &sp->iores);
}
+static void
+qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
+{
+ mrk->entry_type = MARKER_TYPE;
+ mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
+ mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
+ int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
+ host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
+ mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index;
+ }
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3973,6 +3991,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_SA_REPLACE:
qla24xx_sa_replace_iocb(sp, pkt);
break;
+ case SRB_MARKER:
+ qla_marker_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 71feda2cdb63..a07c010b0843 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1862,9 +1862,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
}
}
-srb_t *
-qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
- struct req_que *req, void *iocb)
+static srb_t *
+qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb, u16 *ret_index)
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
@@ -1899,12 +1899,25 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
return NULL;
}
- req->outstanding_cmds[index] = NULL;
-
+ *ret_index = index;
qla_put_fw_resources(sp->qpair, &sp->iores);
return sp;
}
+srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ struct req_que *req, void *iocb)
+{
+ uint16_t index;
+ srb_t *sp;
+
+ sp = qla_get_sp_from_handle(vha, func, req, iocb, &index);
+ if (sp)
+ req->outstanding_cmds[index] = NULL;
+
+ return sp;
+}
+
static void
qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct mbx_entry *mbx)
@@ -3237,13 +3250,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
- req->outstanding_cmds[handle] = NULL;
cp = GET_CMD_SP(sp);
if (cp == NULL) {
ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
sts->handle, sp);
+ req->outstanding_cmds[handle] = NULL;
return;
}
@@ -3514,6 +3527,9 @@ out:
if (rsp->status_srb == NULL)
sp->done(sp, res);
+
+ /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */
+ req->outstanding_cmds[handle] = NULL;
}
/**
@@ -3590,6 +3606,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
uint16_t que = MSW(pkt->handle);
struct req_que *req = NULL;
int res = DID_ERROR << 16;
+ u16 index;
ql_dbg(ql_dbg_async, vha, 0x502a,
"iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
@@ -3608,7 +3625,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
switch (pkt->entry_type) {
case NOTIFY_ACK_TYPE:
- case STATUS_TYPE:
case STATUS_CONT_TYPE:
case LOGINOUT_PORT_IOCB_TYPE:
case CT_IOCB_TYPE:
@@ -3628,6 +3644,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case CTIO_TYPE7:
case CTIO_CRC2:
return 1;
+ case STATUS_TYPE:
+ sp = qla_get_sp_from_handle(vha, func, req, pkt, &index);
+ if (sp) {
+ sp->done(sp, res);
+ req->outstanding_cmds[index] = NULL;
+ return 0;
+ }
+ break;
}
fatal:
ql_log(ql_log_warn, vha, 0x5030,
@@ -3750,6 +3774,28 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
return rc;
}
+static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mrk_entry_24xx *pkt)
+{
+ const char func[] = "MRK-IOCB";
+ srb_t *sp;
+ int res = QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (pkt->entry_status) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n");
+ res = QLA_COMMAND_ERROR;
+ }
+ sp->u.iocb_cmd.u.tmf.data = res;
+ sp->done(sp, res);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
@@ -3863,9 +3909,7 @@ process_err:
(struct nack_to_isp *)pkt);
break;
case MARKER_TYPE:
- /* Do nothing in this case, this check is to prevent it
- * from falling into default case
- */
+ qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt);
break;
case ABORT_IOCB_TYPE:
qla24xx_abort_iocb_entry(vha, rsp->req,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2fa695bf38b7..bc89d3da8fd0 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1079,43 +1079,6 @@ qc24_fail_command:
}
/*
- * qla2x00_eh_wait_on_command
- * Waits for the command to be returned by the Firmware for some
- * max time.
- *
- * Input:
- * cmd = Scsi Command to wait on.
- *
- * Return:
- * Completed in time : QLA_SUCCESS
- * Did not complete in time : QLA_FUNCTION_FAILED
- */
-static int
-qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
-{
-#define ABORT_POLLING_PERIOD 1000
-#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
- unsigned long wait_iter = ABORT_WAIT_ITER;
- scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp = scsi_cmd_priv(cmd);
- int ret = QLA_SUCCESS;
-
- if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
- ql_dbg(ql_dbg_taskm, vha, 0x8005,
- "Return:eh_wait.\n");
- return ret;
- }
-
- while (sp->type && wait_iter--)
- msleep(ABORT_POLLING_PERIOD);
- if (sp->type)
- ret = QLA_FUNCTION_FAILED;
-
- return ret;
-}
-
-/*
* qla2x00_wait_for_hba_online
* Wait till the HBA is online after going through
* <= MAX_RETRIES_OF_ISP_ABORT or
@@ -1365,6 +1328,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
return ret;
}
+#define ABORT_POLLING_PERIOD 1000
+#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD))
+
/*
* Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
*/
@@ -1378,41 +1344,73 @@ __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
struct req_que *req = qpair->req;
srb_t *sp;
struct scsi_cmnd *cmd;
+ unsigned long wait_iter = ABORT_WAIT_ITER;
+ bool found;
+ struct qla_hw_data *ha = vha->hw;
status = QLA_SUCCESS;
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- for (cnt = 1; status == QLA_SUCCESS &&
- cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (!sp)
- continue;
- if (sp->type != SRB_SCSI_CMD)
- continue;
- if (vha->vp_idx != sp->vha->vp_idx)
- continue;
- match = 0;
- cmd = GET_CMD_SP(sp);
- switch (type) {
- case WAIT_HOST:
- match = 1;
- break;
- case WAIT_TARGET:
- match = cmd->device->id == t;
- break;
- case WAIT_LUN:
- match = (cmd->device->id == t &&
- cmd->device->lun == l);
- break;
- }
- if (!match)
- continue;
+ while (wait_iter--) {
+ found = false;
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- status = qla2x00_eh_wait_on_command(cmd);
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (!sp)
+ continue;
+ if (sp->type != SRB_SCSI_CMD)
+ continue;
+ if (vha->vp_idx != sp->vha->vp_idx)
+ continue;
+ match = 0;
+ cmd = GET_CMD_SP(sp);
+ switch (type) {
+ case WAIT_HOST:
+ match = 1;
+ break;
+ case WAIT_TARGET:
+ if (sp->fcport)
+ match = sp->fcport->d_id.b24 == t;
+ else
+ match = 0;
+ break;
+ case WAIT_LUN:
+ if (sp->fcport)
+ match = (sp->fcport->d_id.b24 == t &&
+ cmd->device->lun == l);
+ else
+ match = 0;
+ break;
+ }
+ if (!match)
+ continue;
+
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (unlikely(pci_channel_offline(ha->pdev)) ||
+ ha->flags.eeh_busy) {
+ ql_dbg(ql_dbg_taskm, vha, 0x8005,
+ "Return:eh_wait.\n");
+ return status;
+ }
+
+ /*
+ * SRB_SCSI_CMD is still in the outstanding_cmds array.
+ * it means scsi_done has not called. Wait for it to
+ * clear from outstanding_cmds.
+ */
+ msleep(ABORT_POLLING_PERIOD);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ found = true;
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (!found)
+ break;
}
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ if (!wait_iter && found)
+ status = QLA_FUNCTION_FAILED;
return status;
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 42d69d89834f..4d6f06fb156b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.200-k"
+#define QLA2XXX_VERSION "10.02.08.300-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 200
+#define QLA_DRIVER_BETA_VER 300
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 973d240649ab..789460b0a342 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,7 +1,7 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 228838eb3686..f960b5095d09 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -1108,6 +1108,7 @@ struct pqi_scsi_dev {
u8 volume_offline : 1;
u8 rescan : 1;
u8 ignore_device : 1;
+ u8 erase_in_progress : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_remove;
bool device_offline;
@@ -1147,7 +1148,7 @@ struct pqi_scsi_dev {
struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN];
atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE];
- atomic_t raid_bypass_cnt;
+ unsigned int raid_bypass_cnt;
};
/* VPD inquiry pages */
@@ -1357,6 +1358,7 @@ struct pqi_ctrl_info {
u32 max_write_raid_5_6;
u32 max_write_raid_1_10_2drive;
u32 max_write_raid_1_10_3drive;
+ int numa_node;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 03de97cd72c2..3669affd114b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "2.1.20-035"
+#define DRIVER_VERSION "2.1.22-040"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 1
-#define DRIVER_RELEASE 20
-#define DRIVER_REVISION 35
+#define DRIVER_RELEASE 22
+#define DRIVER_REVISION 40
#define DRIVER_NAME "Microchip SmartPQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
writeb(status, ctrl_info->soft_reset_status);
}
+static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
+{
+ bool io_high_prio;
+ int priority_class;
+
+ io_high_prio = false;
+
+ if (device->ncq_prio_enable) {
+ priority_class =
+ IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
+ if (priority_class == IOPRIO_CLASS_RT) {
+ /* Set NCQ priority for read/write commands. */
+ switch (scmd->cmnd[0]) {
+ case WRITE_16:
+ case READ_16:
+ case WRITE_12:
+ case READ_12:
+ case WRITE_10:
+ case READ_10:
+ case WRITE_6:
+ case READ_6:
+ io_high_prio = true;
+ break;
+ }
+ }
+ }
+
+ return io_high_prio;
+}
+
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
size_t buffer_length, enum dma_data_direction data_direction)
@@ -578,10 +608,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
cdb = request->cdb;
switch (cmd) {
- case TEST_UNIT_READY:
- request->data_direction = SOP_READ_FLAG;
- cdb[0] = TEST_UNIT_READY;
- break;
case INQUIRY:
request->data_direction = SOP_READ_FLAG;
cdb[0] = INQUIRY;
@@ -708,7 +734,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *
}
}
- pqi_reinit_io_request(io_request);
+ if (io_request)
+ pqi_reinit_io_request(io_request);
return io_request;
}
@@ -1588,6 +1615,7 @@ no_buffer:
#define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01
#define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10
+#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10
static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
@@ -1636,6 +1664,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) &
PQI_DEVICE_NCQ_PRIO_SUPPORTED);
+ device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS);
+
return 0;
}
@@ -1681,7 +1711,7 @@ out:
/*
* Prevent adding drive to OS for some corner cases such as a drive
- * undergoing a sanitize operation. Some OSes will continue to poll
+ * undergoing a sanitize (erase) operation. Some OSes will continue to poll
* the drive until the sanitize completes, which can take hours,
* resulting in long bootup delays. Commands such as TUR, READ_CAP
* are allowed, but READ/WRITE cause check condition. So the OS
@@ -1689,73 +1719,9 @@ out:
* Note: devices that have completed sanitize must be re-enabled
* using the management utility.
*/
-static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device)
+static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
{
- u8 scsi_status;
- int rc;
- enum dma_data_direction dir;
- char *buffer;
- int buffer_length = 64;
- size_t sense_data_length;
- struct scsi_sense_hdr sshdr;
- struct pqi_raid_path_request request;
- struct pqi_raid_error_info error_info;
- bool offline = false; /* Assume keep online */
-
- /* Do not check controllers. */
- if (pqi_is_hba_lunid(device->scsi3addr))
- return false;
-
- /* Do not check LVs. */
- if (pqi_is_logical_device(device))
- return false;
-
- buffer = kmalloc(buffer_length, GFP_KERNEL);
- if (!buffer)
- return false; /* Assume not offline */
-
- /* Check for SANITIZE in progress using TUR */
- rc = pqi_build_raid_path_request(ctrl_info, &request,
- TEST_UNIT_READY, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &dir);
- if (rc)
- goto out; /* Assume not offline */
-
- memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number));
-
- rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info);
-
- if (rc)
- goto out; /* Assume not offline */
-
- scsi_status = error_info.status;
- sense_data_length = get_unaligned_le16(&error_info.sense_data_length);
- if (sense_data_length == 0)
- sense_data_length =
- get_unaligned_le16(&error_info.response_data_length);
- if (sense_data_length) {
- if (sense_data_length > sizeof(error_info.data))
- sense_data_length = sizeof(error_info.data);
-
- /*
- * Check for sanitize in progress: asc:0x04, ascq: 0x1b
- */
- if (scsi_status == SAM_STAT_CHECK_CONDITION &&
- scsi_normalize_sense(error_info.data,
- sense_data_length, &sshdr) &&
- sshdr.sense_key == NOT_READY &&
- sshdr.asc == 0x04 &&
- sshdr.ascq == 0x1b) {
- device->device_offline = true;
- offline = true;
- goto out; /* Keep device offline */
- }
- }
-
-out:
- kfree(buffer);
- return offline;
+ return device->erase_in_progress;
}
static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
@@ -2499,10 +2465,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
if (!pqi_is_supported_device(device))
continue;
- /* Do not present disks that the OS cannot fully probe */
- if (pqi_keep_device_offline(ctrl_info, device))
- continue;
-
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
@@ -2525,6 +2487,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
+ /* Do not present disks that the OS cannot fully probe. */
+ if (pqi_keep_device_offline(device))
+ continue;
+
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
@@ -5504,15 +5470,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static int pqi_raid_submit_scsi_cmd_with_io_request(
- struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
- struct pqi_queue_group *queue_group)
+ struct pqi_queue_group *queue_group, bool io_high_prio)
{
int rc;
size_t cdb_length;
+ struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
+ io_request = pqi_alloc_io_request(ctrl_info, scmd);
+ if (!io_request)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
@@ -5522,6 +5492,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request(
request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+ request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number));
@@ -5587,14 +5558,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
- struct pqi_io_request *io_request;
+ bool io_high_prio;
- io_request = pqi_alloc_io_request(ctrl_info, scmd);
- if (!io_request)
- return SCSI_MLQUEUE_HOST_BUSY;
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
- return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
- device, scmd, queue_group);
+ return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio);
}
static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
@@ -5639,44 +5607,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
-{
- bool io_high_prio;
- int priority_class;
-
- io_high_prio = false;
-
- if (device->ncq_prio_enable) {
- priority_class =
- IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd)));
- if (priority_class == IOPRIO_CLASS_RT) {
- /* Set NCQ priority for read/write commands. */
- switch (scmd->cmnd[0]) {
- case WRITE_16:
- case READ_16:
- case WRITE_12:
- case READ_12:
- case WRITE_10:
- case READ_10:
- case WRITE_6:
- case READ_6:
- io_high_prio = true;
- break;
- }
- }
- }
-
- return io_high_prio;
-}
-
static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
bool io_high_prio;
- io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd);
+ io_high_prio = pqi_is_io_high_priority(device, scmd);
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
scmd->cmnd, scmd->cmd_len, queue_group, NULL,
@@ -5694,10 +5631,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct pqi_aio_path_request *request;
struct pqi_scsi_dev *device;
- device = scmd->device->hostdata;
io_request = pqi_alloc_io_request(ctrl_info, scmd);
if (!io_request)
return SCSI_MLQUEUE_HOST_BUSY;
+
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
io_request->raid_bypass = raid_bypass;
@@ -5712,6 +5649,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
request->command_priority = io_high_prio;
put_unaligned_le16(io_request->index, &request->request_id);
request->error_index = request->request_id;
+ device = scmd->device->hostdata;
if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported)
put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number);
if (cdb_length > sizeof(request->cdb))
@@ -6052,7 +5990,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
- atomic_inc(&device->raid_bypass_cnt);
+ device->raid_bypass_cnt++;
}
}
if (!raid_bypassed)
@@ -7288,7 +7226,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
struct scsi_device *sdev;
struct pqi_scsi_dev *device;
unsigned long flags;
- int raid_bypass_cnt;
+ unsigned int raid_bypass_cnt;
sdev = to_scsi_device(dev);
ctrl_info = shost_to_hba(sdev->host);
@@ -7304,7 +7242,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
return -ENODEV;
}
- raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+ raid_bypass_cnt = device->raid_bypass_cnt;
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
@@ -7366,8 +7304,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return -ENODEV;
}
- if (!device->ncq_prio_support ||
- !device->is_physical_device) {
+ if (!device->ncq_prio_support) {
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -EINVAL;
}
@@ -7379,6 +7316,18 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
return strlen(buf);
}
+static ssize_t pqi_numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct scsi_device *sdev;
+ struct pqi_ctrl_info *ctrl_info;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
@@ -7388,6 +7337,7 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
+static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);
static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid.attr,
@@ -7398,6 +7348,7 @@ static struct attribute *pqi_sdev_attrs[] = {
&dev_attr_raid_level.attr,
&dev_attr_raid_bypass_cnt.attr,
&dev_attr_sas_ncq_prio_enable.attr,
+ &dev_attr_numa_node.attr,
NULL
};
@@ -7716,8 +7667,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
features_requested_iomem_addr +
(le16_to_cpu(firmware_features->num_elements) * 2) +
sizeof(__le16);
- writew(PQI_FIRMWARE_FEATURE_MAXIMUM,
- host_max_known_feature_iomem_addr);
+ writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr);
+ writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1);
}
return pqi_config_table_update(ctrl_info,
@@ -8560,7 +8511,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->iomem_base = ioremap(pci_resource_start(
ctrl_info->pci_dev, 0),
- sizeof(struct pqi_ctrl_registers));
+ pci_resource_len(ctrl_info->pci_dev, 0));
if (!ctrl_info->iomem_base) {
dev_err(&ctrl_info->pci_dev->dev,
"failed to map memory for controller registers\n");
@@ -9018,6 +8969,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev,
"failed to allocate controller info block\n");
return -ENOMEM;
}
+ ctrl_info->numa_node = node;
ctrl_info->pci_dev = pci_dev;
@@ -9929,6 +9881,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0804)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0805)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x0806)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x5445)
},
{
@@ -9965,6 +9929,18 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54da)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54db)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1cf2, 0x54dc)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1cf2, 0x0b27)
},
{
@@ -10017,6 +9993,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1014, 0x0718)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
0x1e93, 0x1000)
},
{
@@ -10029,6 +10009,50 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1e93, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1001)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1002)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1003)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1004)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1005)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1006)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1007)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1008)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x1009)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x1f51, 0x100a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 13e8c539010e..a981d0377948 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -92,25 +92,23 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
identify = &rphy->identify;
identify->sas_address = pqi_sas_port->sas_address;
+ identify->phy_identifier = pqi_sas_port->device->phy_id;
identify->initiator_port_protocols = SAS_PROTOCOL_ALL;
identify->target_port_protocols = SAS_PROTOCOL_STP;
- if (pqi_sas_port->device) {
- identify->phy_identifier = pqi_sas_port->device->phy_id;
- switch (pqi_sas_port->device->device_type) {
- case SA_DEVICE_TYPE_SAS:
- case SA_DEVICE_TYPE_SES:
- case SA_DEVICE_TYPE_NVME:
- identify->target_port_protocols = SAS_PROTOCOL_SSP;
- break;
- case SA_DEVICE_TYPE_EXPANDER_SMP:
- identify->target_port_protocols = SAS_PROTOCOL_SMP;
- break;
- case SA_DEVICE_TYPE_SATA:
- default:
- break;
- }
+ switch (pqi_sas_port->device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_SES:
+ case SA_DEVICE_TYPE_NVME:
+ identify->target_port_protocols = SAS_PROTOCOL_SSP;
+ break;
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ identify->target_port_protocols = SAS_PROTOCOL_SMP;
+ break;
+ case SA_DEVICE_TYPE_SATA:
+ default:
+ break;
}
return sas_rphy_add(rphy);
@@ -295,10 +293,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
if (rc)
- goto free_sas_port;
+ goto free_sas_rphy;
return 0;
+free_sas_rphy:
+ sas_rphy_free(rphy);
free_sas_port:
pqi_free_sas_port(pqi_sas_port);
device->sas_port = NULL;
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5811fb3c22a9..673437c7152b 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 9dcbae96a5c6..0c97626d87d4 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microchip PQI-based storage controllers
- * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
index 216d9f4ea0ce..10b8845bc973 100644
--- a/drivers/soc/microchip/mpfs-sys-controller.c
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -71,6 +71,12 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *sys_controller, struct
out:
mutex_unlock(&transaction_lock);
+ if (ret)
+ return ret;
+
+ if (msg->response->resp_status)
+ ret = -EIO;
+
return ret;
}
EXPORT_SYMBOL(mpfs_blocking_transaction);
diff --git a/drivers/soc/rockchip/dtpm.c b/drivers/soc/rockchip/dtpm.c
index 5a23784b5221..b36d4f752c30 100644
--- a/drivers/soc/rockchip/dtpm.c
+++ b/drivers/soc/rockchip/dtpm.c
@@ -12,33 +12,33 @@
#include <linux/platform_device.h>
static struct dtpm_node __initdata rk3399_hierarchy[] = {
- [0]{ .name = "rk3399",
- .type = DTPM_NODE_VIRTUAL },
- [1]{ .name = "package",
- .type = DTPM_NODE_VIRTUAL,
- .parent = &rk3399_hierarchy[0] },
- [2]{ .name = "/cpus/cpu@0",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [3]{ .name = "/cpus/cpu@1",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [4]{ .name = "/cpus/cpu@2",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [5]{ .name = "/cpus/cpu@3",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [6]{ .name = "/cpus/cpu@100",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [7]{ .name = "/cpus/cpu@101",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [8]{ .name = "/gpu@ff9a0000",
- .type = DTPM_NODE_DT,
- .parent = &rk3399_hierarchy[1] },
- [9]{ /* sentinel */ }
+ [0] = { .name = "rk3399",
+ .type = DTPM_NODE_VIRTUAL },
+ [1] = { .name = "package",
+ .type = DTPM_NODE_VIRTUAL,
+ .parent = &rk3399_hierarchy[0] },
+ [2] = { .name = "/cpus/cpu@0",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [3] = { .name = "/cpus/cpu@1",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [4] = { .name = "/cpus/cpu@2",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [5] = { .name = "/cpus/cpu@3",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [6] = { .name = "/cpus/cpu@100",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [7] = { .name = "/cpus/cpu@101",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [8] = { .name = "/gpu@ff9a0000",
+ .type = DTPM_NODE_DT,
+ .parent = &rk3399_hierarchy[1] },
+ [9] = { /* sentinel */ }
};
static struct of_device_id __initdata rockchip_dtpm_match_table[] = {
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 84bc022f9e5b..e3de49e671dc 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -43,8 +43,10 @@ struct rockchip_domain_info {
bool active_wakeup;
int pwr_w_mask;
int req_w_mask;
+ int mem_status_mask;
int repair_status_mask;
u32 pwr_offset;
+ u32 mem_offset;
u32 req_offset;
};
@@ -54,6 +56,9 @@ struct rockchip_pmu_info {
u32 req_offset;
u32 idle_offset;
u32 ack_offset;
+ u32 mem_pwr_offset;
+ u32 chain_status_offset;
+ u32 mem_status_offset;
u32 repair_status_offset;
u32 core_pwrcnt_offset;
@@ -119,13 +124,15 @@ struct rockchip_pmu {
.active_wakeup = wakeup, \
}
-#define DOMAIN_M_O_R(_name, p_offset, pwr, status, r_status, r_offset, req, idle, ack, wakeup) \
+#define DOMAIN_M_O_R(_name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, ack, wakeup) \
{ \
.name = _name, \
.pwr_offset = p_offset, \
.pwr_w_mask = (pwr) << 16, \
.pwr_mask = (pwr), \
.status_mask = (status), \
+ .mem_offset = m_offset, \
+ .mem_status_mask = (m_status), \
.repair_status_mask = (r_status), \
.req_offset = r_offset, \
.req_w_mask = (req) << 16, \
@@ -269,8 +276,8 @@ void rockchip_pmu_unblock(void)
}
EXPORT_SYMBOL_GPL(rockchip_pmu_unblock);
-#define DOMAIN_RK3588(name, p_offset, pwr, status, r_status, r_offset, req, idle, wakeup) \
- DOMAIN_M_O_R(name, p_offset, pwr, status, r_status, r_offset, req, idle, idle, wakeup)
+#define DOMAIN_RK3588(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, wakeup) \
+ DOMAIN_M_O_R(name, p_offset, pwr, status, m_offset, m_status, r_status, r_offset, req, idle, idle, wakeup)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -408,17 +415,92 @@ static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
return !(val & pd->info->status_mask);
}
+static bool rockchip_pmu_domain_is_mem_on(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ regmap_read(pmu->regmap,
+ pmu->info->mem_status_offset + pd->info->mem_offset, &val);
+
+ /* 1'b0: power on, 1'b1: power off */
+ return !(val & pd->info->mem_status_mask);
+}
+
+static bool rockchip_pmu_domain_is_chain_on(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int val;
+
+ regmap_read(pmu->regmap,
+ pmu->info->chain_status_offset + pd->info->mem_offset, &val);
+
+ /* 1'b1: power on, 1'b0: power off */
+ return val & pd->info->mem_status_mask;
+}
+
+static int rockchip_pmu_domain_mem_reset(struct rockchip_pm_domain *pd)
+{
+ struct rockchip_pmu *pmu = pd->pmu;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ bool is_on;
+ int ret = 0;
+
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_chain_on, pd, is_on,
+ is_on == true, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get chain status '%s', target_on=1, val=%d\n",
+ genpd->name, is_on);
+ goto error;
+ }
+
+ udelay(20);
+
+ regmap_write(pmu->regmap, pmu->info->mem_pwr_offset + pd->info->pwr_offset,
+ (pd->info->pwr_mask | pd->info->pwr_w_mask));
+ wmb();
+
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_mem_on, pd, is_on,
+ is_on == false, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get mem status '%s', target_on=0, val=%d\n",
+ genpd->name, is_on);
+ goto error;
+ }
+
+ regmap_write(pmu->regmap, pmu->info->mem_pwr_offset + pd->info->pwr_offset,
+ pd->info->pwr_w_mask);
+ wmb();
+
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_mem_on, pd, is_on,
+ is_on == true, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get mem status '%s', target_on=1, val=%d\n",
+ genpd->name, is_on);
+ }
+
+error:
+ return ret;
+}
+
static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
struct generic_pm_domain *genpd = &pd->genpd;
u32 pd_pwr_offset = pd->info->pwr_offset;
- bool is_on;
+ bool is_on, is_mem_on = false;
if (pd->info->pwr_mask == 0)
return;
- else if (pd->info->pwr_w_mask)
+
+ if (on && pd->info->mem_status_mask)
+ is_mem_on = rockchip_pmu_domain_is_mem_on(pd);
+
+ if (pd->info->pwr_w_mask)
regmap_write(pmu->regmap, pmu->info->pwr_offset + pd_pwr_offset,
on ? pd->info->pwr_w_mask :
(pd->info->pwr_mask | pd->info->pwr_w_mask));
@@ -428,6 +510,9 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
wmb();
+ if (is_mem_on && rockchip_pmu_domain_mem_reset(pd))
+ return;
+
if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
is_on == on, 0, 10000)) {
dev_err(pmu->dev,
@@ -645,7 +730,9 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.flags = GENPD_FLAG_PM_CLK;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
- pm_genpd_init(&pd->genpd, NULL, !rockchip_pmu_domain_is_on(pd));
+ pm_genpd_init(&pd->genpd, NULL,
+ !rockchip_pmu_domain_is_on(pd) ||
+ (pd->info->mem_status_mask && !rockchip_pmu_domain_is_mem_on(pd)));
pmu->genpd_data.domains[id] = &pd->genpd;
return 0;
@@ -1024,35 +1111,35 @@ static const struct rockchip_domain_info rk3568_pm_domains[] = {
};
static const struct rockchip_domain_info rk3588_pm_domains[] = {
- [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, BIT(1), 0x0, BIT(0), BIT(0), false),
- [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0, 0x0, 0, 0, false),
- [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0, 0x0, 0, 0, false),
- [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, BIT(2), 0x0, BIT(1), BIT(1), false),
- [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, BIT(3), 0x0, BIT(2), BIT(2), false),
- [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, BIT(4), 0x0, BIT(3), BIT(3), false),
- [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, BIT(5), 0x0, BIT(4), BIT(4), false),
- [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, BIT(6), 0x0, BIT(5), BIT(5), false),
- [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, BIT(7), 0x0, BIT(6), BIT(6), false),
- [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, BIT(8), 0x0, BIT(7), BIT(7), false),
- [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, BIT(9), 0x0, BIT(8), BIT(8), false),
- [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, BIT(10), 0x0, 0, 0, false),
- [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, BIT(11), 0x0, BIT(9), BIT(9), false),
- [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, BIT(12), 0x0, BIT(10), BIT(10), false),
- [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, BIT(13), 0x0, 0, 0, false),
- [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, BIT(14), 0x0, BIT(11), BIT(11), false),
- [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, BIT(15), 0x0, BIT(12), BIT(12), false),
- [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
- [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, BIT(17), 0x0, BIT(15), BIT(15), false),
- [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, BIT(18), 0x4, BIT(0), BIT(16), false),
- [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, BIT(19), 0x4, BIT(1), BIT(17), false),
- [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, BIT(20), 0x4, BIT(5), BIT(21), false),
- [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, BIT(21), 0x0, 0, 0, false),
- [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, BIT(22), 0x0, 0, 0, true),
- [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0, 0x4, BIT(2), BIT(18), false),
- [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, BIT(23), 0x0, 0, 0, false),
- [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, BIT(24), 0x4, BIT(3), BIT(19), false),
- [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, BIT(25), 0x4, BIT(4), BIT(20), true),
- [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, BIT(26), 0x0, 0, 0, false),
+ [RK3588_PD_GPU] = DOMAIN_RK3588("gpu", 0x0, BIT(0), 0, 0x0, 0, BIT(1), 0x0, BIT(0), BIT(0), false),
+ [RK3588_PD_NPU] = DOMAIN_RK3588("npu", 0x0, BIT(1), BIT(1), 0x0, 0, 0, 0x0, 0, 0, false),
+ [RK3588_PD_VCODEC] = DOMAIN_RK3588("vcodec", 0x0, BIT(2), BIT(2), 0x0, 0, 0, 0x0, 0, 0, false),
+ [RK3588_PD_NPUTOP] = DOMAIN_RK3588("nputop", 0x0, BIT(3), 0, 0x0, BIT(11), BIT(2), 0x0, BIT(1), BIT(1), false),
+ [RK3588_PD_NPU1] = DOMAIN_RK3588("npu1", 0x0, BIT(4), 0, 0x0, BIT(12), BIT(3), 0x0, BIT(2), BIT(2), false),
+ [RK3588_PD_NPU2] = DOMAIN_RK3588("npu2", 0x0, BIT(5), 0, 0x0, BIT(13), BIT(4), 0x0, BIT(3), BIT(3), false),
+ [RK3588_PD_VENC0] = DOMAIN_RK3588("venc0", 0x0, BIT(6), 0, 0x0, BIT(14), BIT(5), 0x0, BIT(4), BIT(4), false),
+ [RK3588_PD_VENC1] = DOMAIN_RK3588("venc1", 0x0, BIT(7), 0, 0x0, BIT(15), BIT(6), 0x0, BIT(5), BIT(5), false),
+ [RK3588_PD_RKVDEC0] = DOMAIN_RK3588("rkvdec0", 0x0, BIT(8), 0, 0x0, BIT(16), BIT(7), 0x0, BIT(6), BIT(6), false),
+ [RK3588_PD_RKVDEC1] = DOMAIN_RK3588("rkvdec1", 0x0, BIT(9), 0, 0x0, BIT(17), BIT(8), 0x0, BIT(7), BIT(7), false),
+ [RK3588_PD_VDPU] = DOMAIN_RK3588("vdpu", 0x0, BIT(10), 0, 0x0, BIT(18), BIT(9), 0x0, BIT(8), BIT(8), false),
+ [RK3588_PD_RGA30] = DOMAIN_RK3588("rga30", 0x0, BIT(11), 0, 0x0, BIT(19), BIT(10), 0x0, 0, 0, false),
+ [RK3588_PD_AV1] = DOMAIN_RK3588("av1", 0x0, BIT(12), 0, 0x0, BIT(20), BIT(11), 0x0, BIT(9), BIT(9), false),
+ [RK3588_PD_VI] = DOMAIN_RK3588("vi", 0x0, BIT(13), 0, 0x0, BIT(21), BIT(12), 0x0, BIT(10), BIT(10), false),
+ [RK3588_PD_FEC] = DOMAIN_RK3588("fec", 0x0, BIT(14), 0, 0x0, BIT(22), BIT(13), 0x0, 0, 0, false),
+ [RK3588_PD_ISP1] = DOMAIN_RK3588("isp1", 0x0, BIT(15), 0, 0x0, BIT(23), BIT(14), 0x0, BIT(11), BIT(11), false),
+ [RK3588_PD_RGA31] = DOMAIN_RK3588("rga31", 0x4, BIT(0), 0, 0x0, BIT(24), BIT(15), 0x0, BIT(12), BIT(12), false),
+ [RK3588_PD_VOP] = DOMAIN_RK3588("vop", 0x4, BIT(1), 0, 0x0, BIT(25), BIT(16), 0x0, BIT(13) | BIT(14), BIT(13) | BIT(14), false),
+ [RK3588_PD_VO0] = DOMAIN_RK3588("vo0", 0x4, BIT(2), 0, 0x0, BIT(26), BIT(17), 0x0, BIT(15), BIT(15), false),
+ [RK3588_PD_VO1] = DOMAIN_RK3588("vo1", 0x4, BIT(3), 0, 0x0, BIT(27), BIT(18), 0x4, BIT(0), BIT(16), false),
+ [RK3588_PD_AUDIO] = DOMAIN_RK3588("audio", 0x4, BIT(4), 0, 0x0, BIT(28), BIT(19), 0x4, BIT(1), BIT(17), false),
+ [RK3588_PD_PHP] = DOMAIN_RK3588("php", 0x4, BIT(5), 0, 0x0, BIT(29), BIT(20), 0x4, BIT(5), BIT(21), false),
+ [RK3588_PD_GMAC] = DOMAIN_RK3588("gmac", 0x4, BIT(6), 0, 0x0, BIT(30), BIT(21), 0x0, 0, 0, false),
+ [RK3588_PD_PCIE] = DOMAIN_RK3588("pcie", 0x4, BIT(7), 0, 0x0, BIT(31), BIT(22), 0x0, 0, 0, true),
+ [RK3588_PD_NVM] = DOMAIN_RK3588("nvm", 0x4, BIT(8), BIT(24), 0x4, 0, 0, 0x4, BIT(2), BIT(18), false),
+ [RK3588_PD_NVM0] = DOMAIN_RK3588("nvm0", 0x4, BIT(9), 0, 0x4, BIT(1), BIT(23), 0x0, 0, 0, false),
+ [RK3588_PD_SDIO] = DOMAIN_RK3588("sdio", 0x4, BIT(10), 0, 0x4, BIT(2), BIT(24), 0x4, BIT(3), BIT(19), false),
+ [RK3588_PD_USB] = DOMAIN_RK3588("usb", 0x4, BIT(11), 0, 0x4, BIT(3), BIT(25), 0x4, BIT(4), BIT(20), true),
+ [RK3588_PD_SDMMC] = DOMAIN_RK3588("sdmmc", 0x4, BIT(13), 0, 0x4, BIT(5), BIT(26), 0x0, 0, 0, false),
};
static const struct rockchip_pmu_info px30_pmu = {
@@ -1207,6 +1294,9 @@ static const struct rockchip_pmu_info rk3588_pmu = {
.req_offset = 0x10c,
.idle_offset = 0x120,
.ack_offset = 0x118,
+ .mem_pwr_offset = 0x1a0,
+ .chain_status_offset = 0x1f0,
+ .mem_status_offset = 0x1f8,
.repair_status_offset = 0x290,
.num_domains = ARRAY_SIZE(rk3588_pm_domains),
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 78a8a7545d1e..641dcc958911 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -218,7 +218,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
} else if (ret != -EACCES && ret != -ENODEV) {
dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
return ret;
- } else if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ } else if (of_property_present(pdev->dev.of_node, "mboxes")) {
zynqmp_pm_init_suspend_work =
devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
@@ -240,7 +240,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request rx channel\n");
return PTR_ERR(rx_chan);
}
- } else if (of_find_property(pdev->dev.of_node, "interrupts", NULL)) {
+ } else if (of_property_present(pdev->dev.of_node, "interrupts")) {
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -ENXIO;
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1ea6a64f8c4a..338f4f0b5d0c 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1588,7 +1588,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
unsigned long port;
bool slave_notify;
u8 sdca_cascade = 0;
- u8 buf, buf2[2], _buf, _buf2[2];
+ u8 buf, buf2[2];
bool parity_check;
bool parity_quirk;
@@ -1745,9 +1745,9 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
"SDW_SCP_INT1 recheck read failed:%d\n", ret);
goto io_err;
}
- _buf = ret;
+ buf = ret;
- ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, _buf2);
+ ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
if (ret < 0) {
dev_err(&slave->dev,
"SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
@@ -1765,12 +1765,8 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
}
/*
- * Make sure no interrupts are pending, but filter to limit loop
- * to interrupts identified in the first status read
+ * Make sure no interrupts are pending
*/
- buf &= _buf;
- buf2[0] &= _buf2[0];
- buf2[1] &= _buf2[1];
stat = buf || buf2[0] || buf2[1] || sdca_cascade;
/*
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index c296e0bf897b..aad5942e5980 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -31,6 +31,7 @@
#define SWRM_VERSION_1_3_0 0x01030000
#define SWRM_VERSION_1_5_1 0x01050001
#define SWRM_VERSION_1_7_0 0x01070000
+#define SWRM_VERSION_2_0_0 0x02000000
#define SWRM_COMP_HW_VERSION 0x00
#define SWRM_COMP_CFG_ADDR 0x04
#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
@@ -41,7 +42,8 @@
#define SWRM_COMP_PARAMS_DOUT_PORTS_MASK GENMASK(4, 0)
#define SWRM_COMP_PARAMS_DIN_PORTS_MASK GENMASK(9, 5)
#define SWRM_COMP_MASTER_ID 0x104
-#define SWRM_INTERRUPT_STATUS 0x200
+#define SWRM_V1_3_INTERRUPT_STATUS 0x200
+#define SWRM_V2_0_INTERRUPT_STATUS 0x5000
#define SWRM_INTERRUPT_STATUS_RMSK GENMASK(16, 0)
#define SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ BIT(0)
#define SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED BIT(1)
@@ -54,24 +56,32 @@
#define SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION BIT(8)
#define SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH BIT(9)
#define SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED BIT(10)
-#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2 BIT(13)
-#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2 BIT(14)
-#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP BIT(16)
+#define SWRM_INTERRUPT_STATUS_AUTO_ENUM_FAILED BIT(11)
+#define SWRM_INTERRUPT_STATUS_AUTO_ENUM_TABLE_IS_FULL BIT(12)
+#define SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2 BIT(13)
+#define SWRM_INTERRUPT_STATUS_CLK_STOP_FINISHED_V2 BIT(14)
+#define SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP BIT(16)
#define SWRM_INTERRUPT_MAX 17
-#define SWRM_INTERRUPT_MASK_ADDR 0x204
-#define SWRM_INTERRUPT_CLEAR 0x208
-#define SWRM_INTERRUPT_CPU_EN 0x210
-#define SWRM_CMD_FIFO_WR_CMD 0x300
-#define SWRM_CMD_FIFO_RD_CMD 0x304
+#define SWRM_V1_3_INTERRUPT_MASK_ADDR 0x204
+#define SWRM_V1_3_INTERRUPT_CLEAR 0x208
+#define SWRM_V2_0_INTERRUPT_CLEAR 0x5008
+#define SWRM_V1_3_INTERRUPT_CPU_EN 0x210
+#define SWRM_V2_0_INTERRUPT_CPU_EN 0x5004
+#define SWRM_V1_3_CMD_FIFO_WR_CMD 0x300
+#define SWRM_V2_0_CMD_FIFO_WR_CMD 0x5020
+#define SWRM_V1_3_CMD_FIFO_RD_CMD 0x304
+#define SWRM_V2_0_CMD_FIFO_RD_CMD 0x5024
#define SWRM_CMD_FIFO_CMD 0x308
#define SWRM_CMD_FIFO_FLUSH 0x1
-#define SWRM_CMD_FIFO_STATUS 0x30C
+#define SWRM_V1_3_CMD_FIFO_STATUS 0x30C
+#define SWRM_V2_0_CMD_FIFO_STATUS 0x5050
#define SWRM_RD_CMD_FIFO_CNT_MASK GENMASK(20, 16)
#define SWRM_WR_CMD_FIFO_CNT_MASK GENMASK(12, 8)
#define SWRM_CMD_FIFO_CFG_ADDR 0x314
#define SWRM_CONTINUE_EXEC_ON_CMD_IGNORE BIT(31)
#define SWRM_RD_WR_CMD_RETRIES 0x7
-#define SWRM_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_V1_3_CMD_FIFO_RD_FIFO_ADDR 0x318
+#define SWRM_V2_0_CMD_FIFO_RD_FIFO_ADDR 0x5040
#define SWRM_RD_FIFO_CMD_ID_MASK GENMASK(11, 8)
#define SWRM_ENUMERATOR_CFG_ADDR 0x500
#define SWRM_ENUMERATOR_SLAVE_DEV_ID_1(m) (0x530 + 0x8 * (m))
@@ -95,8 +105,14 @@
#define SWRM_DP_BLOCK_CTRL2_BANK(n, m) (0x1130 + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DP_PORT_HCTRL_BANK(n, m) (0x1134 + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DP_BLOCK_CTRL3_BANK(n, m) (0x1138 + 0x100 * (n - 1) + 0x40 * m)
+#define SWRM_DP_SAMPLECTRL2_BANK(n, m) (0x113C + 0x100 * (n - 1) + 0x40 * m)
#define SWRM_DIN_DPn_PCM_PORT_CTRL(n) (0x1054 + 0x100 * (n - 1))
-#define SWR_MSTR_MAX_REG_ADDR (0x1740)
+#define SWR_V1_3_MSTR_MAX_REG_ADDR 0x1740
+#define SWR_V2_0_MSTR_MAX_REG_ADDR 0x50ac
+
+#define SWRM_V2_0_CLK_CTRL 0x5060
+#define SWRM_V2_0_CLK_CTRL_CLK_START BIT(0)
+#define SWRM_V2_0_LINK_STATUS 0x5064
#define SWRM_DP_PORT_CTRL_EN_CHAN_SHFT 0x18
#define SWRM_DP_PORT_CTRL_OFFSET2_SHFT 0x10
@@ -109,20 +125,20 @@
#define SWRM_REG_VAL_PACK(data, dev, id, reg) \
((reg) | ((id) << 16) | ((dev) << 20) | ((data) << 24))
-#define MAX_FREQ_NUM 1
-#define TIMEOUT_MS 100
-#define QCOM_SWRM_MAX_RD_LEN 0x1
-#define QCOM_SDW_MAX_PORTS 14
-#define DEFAULT_CLK_FREQ 9600000
-#define SWRM_MAX_DAIS 0xF
-#define SWR_INVALID_PARAM 0xFF
-#define SWR_HSTOP_MAX_VAL 0xF
-#define SWR_HSTART_MIN_VAL 0x0
-#define SWR_BROADCAST_CMD_ID 0x0F
-#define SWR_MAX_CMD_ID 14
-#define MAX_FIFO_RD_RETRY 3
-#define SWR_OVERFLOW_RETRY_COUNT 30
-#define SWRM_LINK_STATUS_RETRY_CNT 100
+#define MAX_FREQ_NUM 1
+#define TIMEOUT_MS 100
+#define QCOM_SWRM_MAX_RD_LEN 0x1
+#define QCOM_SDW_MAX_PORTS 14
+#define DEFAULT_CLK_FREQ 9600000
+#define SWRM_MAX_DAIS 0xF
+#define SWR_INVALID_PARAM 0xFF
+#define SWR_HSTOP_MAX_VAL 0xF
+#define SWR_HSTART_MIN_VAL 0x0
+#define SWR_BROADCAST_CMD_ID 0x0F
+#define SWR_MAX_CMD_ID 14
+#define MAX_FIFO_RD_RETRY 3
+#define SWR_OVERFLOW_RETRY_COUNT 30
+#define SWRM_LINK_STATUS_RETRY_CNT 100
enum {
MASTER_ID_WSA = 1,
@@ -131,7 +147,7 @@ enum {
};
struct qcom_swrm_port_config {
- u8 si;
+ u16 si;
u8 off1;
u8 off2;
u8 bp_mode;
@@ -142,10 +158,28 @@ struct qcom_swrm_port_config {
u8 lane_control;
};
+/*
+ * Internal IDs for different register layouts. Only few registers differ per
+ * each variant, so the list of IDs below does not include all of registers.
+ */
+enum {
+ SWRM_REG_FRAME_GEN_ENABLED,
+ SWRM_REG_INTERRUPT_STATUS,
+ SWRM_REG_INTERRUPT_MASK_ADDR,
+ SWRM_REG_INTERRUPT_CLEAR,
+ SWRM_REG_INTERRUPT_CPU_EN,
+ SWRM_REG_CMD_FIFO_WR_CMD,
+ SWRM_REG_CMD_FIFO_RD_CMD,
+ SWRM_REG_CMD_FIFO_STATUS,
+ SWRM_REG_CMD_FIFO_RD_FIFO_ADDR,
+};
+
struct qcom_swrm_ctrl {
struct sdw_bus bus;
struct device *dev;
struct regmap *regmap;
+ u32 max_reg;
+ const unsigned int *reg_layout;
void __iomem *mmio;
struct reset_control *audio_cgcr;
#ifdef CONFIG_DEBUG_FS
@@ -186,22 +220,62 @@ struct qcom_swrm_data {
u32 default_cols;
u32 default_rows;
bool sw_clk_gate_required;
+ u32 max_reg;
+ const unsigned int *reg_layout;
+};
+
+static const unsigned int swrm_v1_3_reg_layout[] = {
+ [SWRM_REG_FRAME_GEN_ENABLED] = SWRM_COMP_STATUS,
+ [SWRM_REG_INTERRUPT_STATUS] = SWRM_V1_3_INTERRUPT_STATUS,
+ [SWRM_REG_INTERRUPT_MASK_ADDR] = SWRM_V1_3_INTERRUPT_MASK_ADDR,
+ [SWRM_REG_INTERRUPT_CLEAR] = SWRM_V1_3_INTERRUPT_CLEAR,
+ [SWRM_REG_INTERRUPT_CPU_EN] = SWRM_V1_3_INTERRUPT_CPU_EN,
+ [SWRM_REG_CMD_FIFO_WR_CMD] = SWRM_V1_3_CMD_FIFO_WR_CMD,
+ [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V1_3_CMD_FIFO_RD_CMD,
+ [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V1_3_CMD_FIFO_STATUS,
+ [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V1_3_CMD_FIFO_RD_FIFO_ADDR,
};
static const struct qcom_swrm_data swrm_v1_3_data = {
.default_rows = 48,
.default_cols = 16,
+ .max_reg = SWR_V1_3_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v1_3_reg_layout,
};
static const struct qcom_swrm_data swrm_v1_5_data = {
.default_rows = 50,
.default_cols = 16,
+ .max_reg = SWR_V1_3_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v1_3_reg_layout,
};
static const struct qcom_swrm_data swrm_v1_6_data = {
.default_rows = 50,
.default_cols = 16,
.sw_clk_gate_required = true,
+ .max_reg = SWR_V1_3_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v1_3_reg_layout,
+};
+
+static const unsigned int swrm_v2_0_reg_layout[] = {
+ [SWRM_REG_FRAME_GEN_ENABLED] = SWRM_V2_0_LINK_STATUS,
+ [SWRM_REG_INTERRUPT_STATUS] = SWRM_V2_0_INTERRUPT_STATUS,
+ [SWRM_REG_INTERRUPT_MASK_ADDR] = 0, /* Not present */
+ [SWRM_REG_INTERRUPT_CLEAR] = SWRM_V2_0_INTERRUPT_CLEAR,
+ [SWRM_REG_INTERRUPT_CPU_EN] = SWRM_V2_0_INTERRUPT_CPU_EN,
+ [SWRM_REG_CMD_FIFO_WR_CMD] = SWRM_V2_0_CMD_FIFO_WR_CMD,
+ [SWRM_REG_CMD_FIFO_RD_CMD] = SWRM_V2_0_CMD_FIFO_RD_CMD,
+ [SWRM_REG_CMD_FIFO_STATUS] = SWRM_V2_0_CMD_FIFO_STATUS,
+ [SWRM_REG_CMD_FIFO_RD_FIFO_ADDR] = SWRM_V2_0_CMD_FIFO_RD_FIFO_ADDR,
+};
+
+static const struct qcom_swrm_data swrm_v2_0_data = {
+ .default_rows = 50,
+ .default_cols = 16,
+ .sw_clk_gate_required = true,
+ .max_reg = SWR_V2_0_MSTR_MAX_REG_ADDR,
+ .reg_layout = swrm_v2_0_reg_layout,
};
#define to_qcom_sdw(b) container_of(b, struct qcom_swrm_ctrl, bus)
@@ -278,14 +352,15 @@ static u32 swrm_get_packed_reg_val(u8 *cmd_id, u8 cmd_data,
return val;
}
-static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *swrm)
+static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *ctrl)
{
u32 fifo_outstanding_data, value;
int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
do {
/* Check for fifo underflow during read */
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
fifo_outstanding_data = FIELD_GET(SWRM_RD_CMD_FIFO_CNT_MASK, value);
/* Check if read data is available in read fifo */
@@ -296,39 +371,40 @@ static int swrm_wait_for_rd_fifo_avail(struct qcom_swrm_ctrl *swrm)
} while (fifo_retry_count--);
if (fifo_outstanding_data == 0) {
- dev_err_ratelimited(swrm->dev, "%s err read underflow\n", __func__);
+ dev_err_ratelimited(ctrl->dev, "%s err read underflow\n", __func__);
return -EIO;
}
return 0;
}
-static int swrm_wait_for_wr_fifo_avail(struct qcom_swrm_ctrl *swrm)
+static int swrm_wait_for_wr_fifo_avail(struct qcom_swrm_ctrl *ctrl)
{
u32 fifo_outstanding_cmds, value;
int fifo_retry_count = SWR_OVERFLOW_RETRY_COUNT;
do {
/* Check for fifo overflow during write */
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
fifo_outstanding_cmds = FIELD_GET(SWRM_WR_CMD_FIFO_CNT_MASK, value);
/* Check for space in write fifo before writing */
- if (fifo_outstanding_cmds < swrm->wr_fifo_depth)
+ if (fifo_outstanding_cmds < ctrl->wr_fifo_depth)
return 0;
usleep_range(500, 510);
} while (fifo_retry_count--);
- if (fifo_outstanding_cmds == swrm->wr_fifo_depth) {
- dev_err_ratelimited(swrm->dev, "%s err write overflow\n", __func__);
+ if (fifo_outstanding_cmds == ctrl->wr_fifo_depth) {
+ dev_err_ratelimited(ctrl->dev, "%s err write overflow\n", __func__);
return -EIO;
}
return 0;
}
-static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
+static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *ctrl, u8 cmd_data,
u8 dev_addr, u16 reg_addr)
{
@@ -341,20 +417,20 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
val = swrm_get_packed_reg_val(&cmd_id, cmd_data,
dev_addr, reg_addr);
} else {
- val = swrm_get_packed_reg_val(&swrm->wcmd_id, cmd_data,
+ val = swrm_get_packed_reg_val(&ctrl->wcmd_id, cmd_data,
dev_addr, reg_addr);
}
- if (swrm_wait_for_wr_fifo_avail(swrm))
+ if (swrm_wait_for_wr_fifo_avail(ctrl))
return SDW_CMD_FAIL_OTHER;
if (cmd_id == SWR_BROADCAST_CMD_ID)
- reinit_completion(&swrm->broadcast);
+ reinit_completion(&ctrl->broadcast);
/* Its assumed that write is okay as we do not get any status back */
- swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_WR_CMD], val);
- if (swrm->version <= SWRM_VERSION_1_3_0)
+ if (ctrl->version <= SWRM_VERSION_1_3_0)
usleep_range(150, 155);
if (cmd_id == SWR_BROADCAST_CMD_ID) {
@@ -362,7 +438,7 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
* sleep for 10ms for MSM soundwire variant to allow broadcast
* command to complete.
*/
- ret = wait_for_completion_timeout(&swrm->broadcast,
+ ret = wait_for_completion_timeout(&ctrl->broadcast,
msecs_to_jiffies(TIMEOUT_MS));
if (!ret)
ret = SDW_CMD_IGNORED;
@@ -375,41 +451,44 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
return ret;
}
-static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
+static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *ctrl,
u8 dev_addr, u16 reg_addr,
u32 len, u8 *rval)
{
u32 cmd_data, cmd_id, val, retry_attempt = 0;
- val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr);
+ val = swrm_get_packed_reg_val(&ctrl->rcmd_id, len, dev_addr, reg_addr);
/*
* Check for outstanding cmd wrt. write fifo depth to avoid
* overflow as read will also increase write fifo cnt.
*/
- swrm_wait_for_wr_fifo_avail(swrm);
+ swrm_wait_for_wr_fifo_avail(ctrl);
/* wait for FIFO RD to complete to avoid overflow */
usleep_range(100, 105);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_RD_CMD], val);
/* wait for FIFO RD CMD complete to avoid overflow */
usleep_range(250, 255);
- if (swrm_wait_for_rd_fifo_avail(swrm))
+ if (swrm_wait_for_rd_fifo_avail(ctrl))
return SDW_CMD_FAIL_OTHER;
do {
- swrm->reg_read(swrm, SWRM_CMD_FIFO_RD_FIFO_ADDR, &cmd_data);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_CMD_FIFO_RD_FIFO_ADDR],
+ &cmd_data);
rval[0] = cmd_data & 0xFF;
cmd_id = FIELD_GET(SWRM_RD_FIFO_CMD_ID_MASK, cmd_data);
- if (cmd_id != swrm->rcmd_id) {
+ if (cmd_id != ctrl->rcmd_id) {
if (retry_attempt < (MAX_FIFO_RD_RETRY - 1)) {
/* wait 500 us before retry on fifo read failure */
usleep_range(500, 505);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD,
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD,
SWRM_CMD_FIFO_FLUSH);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val);
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_RD_CMD],
+ val);
}
retry_attempt++;
} else {
@@ -418,9 +497,9 @@ static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm,
} while (retry_attempt < MAX_FIFO_RD_RETRY);
- dev_err(swrm->dev, "failed to read fifo: reg: 0x%x, rcmd_id: 0x%x,\
+ dev_err(ctrl->dev, "failed to read fifo: reg: 0x%x, rcmd_id: 0x%x,\
dev_num: 0x%x, cmd_data: 0x%x\n",
- reg_addr, swrm->rcmd_id, dev_addr, cmd_data);
+ reg_addr, ctrl->rcmd_id, dev_addr, cmd_data);
return SDW_CMD_IGNORED;
}
@@ -532,39 +611,40 @@ static int qcom_swrm_enumerate(struct sdw_bus *bus)
static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id)
{
- struct qcom_swrm_ctrl *swrm = dev_id;
+ struct qcom_swrm_ctrl *ctrl = dev_id;
int ret;
- ret = pm_runtime_resume_and_get(swrm->dev);
+ ret = pm_runtime_resume_and_get(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
return ret;
}
- if (swrm->wake_irq > 0) {
- if (!irqd_irq_disabled(irq_get_irq_data(swrm->wake_irq)))
- disable_irq_nosync(swrm->wake_irq);
+ if (ctrl->wake_irq > 0) {
+ if (!irqd_irq_disabled(irq_get_irq_data(ctrl->wake_irq)))
+ disable_irq_nosync(ctrl->wake_irq);
}
- pm_runtime_mark_last_busy(swrm->dev);
- pm_runtime_put_autosuspend(swrm->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
return IRQ_HANDLED;
}
static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
{
- struct qcom_swrm_ctrl *swrm = dev_id;
+ struct qcom_swrm_ctrl *ctrl = dev_id;
u32 value, intr_sts, intr_sts_masked, slave_status;
u32 i;
int devnum;
int ret = IRQ_HANDLED;
- clk_prepare_enable(swrm->hclk);
+ clk_prepare_enable(ctrl->hclk);
- swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
- intr_sts_masked = intr_sts & swrm->intr_mask;
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_STATUS],
+ &intr_sts);
+ intr_sts_masked = intr_sts & ctrl->intr_mask;
do {
for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
@@ -574,80 +654,92 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
switch (value) {
case SWRM_INTERRUPT_STATUS_SLAVE_PEND_IRQ:
- devnum = qcom_swrm_get_alert_slave_dev_num(swrm);
+ devnum = qcom_swrm_get_alert_slave_dev_num(ctrl);
if (devnum < 0) {
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"no slave alert found.spurious interrupt\n");
} else {
- sdw_handle_slave_status(&swrm->bus, swrm->status);
+ sdw_handle_slave_status(&ctrl->bus, ctrl->status);
}
break;
case SWRM_INTERRUPT_STATUS_NEW_SLAVE_ATTACHED:
case SWRM_INTERRUPT_STATUS_CHANGE_ENUM_SLAVE_STATUS:
- dev_dbg_ratelimited(swrm->dev, "SWR new slave attached\n");
- swrm->reg_read(swrm, SWRM_MCP_SLV_STATUS, &slave_status);
- if (swrm->slave_status == slave_status) {
- dev_dbg(swrm->dev, "Slave status not changed %x\n",
+ dev_dbg_ratelimited(ctrl->dev, "SWR new slave attached\n");
+ ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &slave_status);
+ if (ctrl->slave_status == slave_status) {
+ dev_dbg(ctrl->dev, "Slave status not changed %x\n",
slave_status);
} else {
- qcom_swrm_get_device_status(swrm);
- qcom_swrm_enumerate(&swrm->bus);
- sdw_handle_slave_status(&swrm->bus, swrm->status);
+ qcom_swrm_get_device_status(ctrl);
+ qcom_swrm_enumerate(&ctrl->bus);
+ sdw_handle_slave_status(&ctrl->bus, ctrl->status);
}
break;
case SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR bus clsh detected\n",
__func__);
- swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
- swrm->reg_write(swrm, SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ ctrl->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
break;
case SWRM_INTERRUPT_STATUS_RD_FIFO_OVERFLOW:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR read FIFO overflow fifo status 0x%x\n",
__func__, value);
break;
case SWRM_INTERRUPT_STATUS_RD_FIFO_UNDERFLOW:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR read FIFO underflow fifo status 0x%x\n",
__func__, value);
break;
case SWRM_INTERRUPT_STATUS_WR_CMD_FIFO_OVERFLOW:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err(ctrl->dev,
"%s: SWR write FIFO overflow fifo status %x\n",
__func__, value);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
break;
case SWRM_INTERRUPT_STATUS_CMD_ERROR:
- swrm->reg_read(swrm, SWRM_CMD_FIFO_STATUS, &value);
- dev_err_ratelimited(swrm->dev,
+ ctrl->reg_read(ctrl,
+ ctrl->reg_layout[SWRM_REG_CMD_FIFO_STATUS],
+ &value);
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR CMD error, fifo status 0x%x, flushing fifo\n",
__func__, value);
- swrm->reg_write(swrm, SWRM_CMD_FIFO_CMD, 0x1);
+ ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CMD, 0x1);
break;
case SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR Port collision detected\n",
__func__);
- swrm->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
- swrm->reg_write(swrm,
- SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ ctrl->intr_mask &= ~SWRM_INTERRUPT_STATUS_DOUT_PORT_COLLISION;
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
break;
case SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR read enable valid mismatch\n",
__func__);
- swrm->intr_mask &=
+ ctrl->intr_mask &=
~SWRM_INTERRUPT_STATUS_READ_EN_RD_VALID_MISMATCH;
- swrm->reg_write(swrm,
- SWRM_INTERRUPT_CPU_EN, swrm->intr_mask);
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
break;
case SWRM_INTERRUPT_STATUS_SPECIAL_CMD_ID_FINISHED:
- complete(&swrm->broadcast);
+ complete(&ctrl->broadcast);
break;
case SWRM_INTERRUPT_STATUS_BUS_RESET_FINISHED_V2:
break;
@@ -656,19 +748,21 @@ static irqreturn_t qcom_swrm_irq_handler(int irq, void *dev_id)
case SWRM_INTERRUPT_STATUS_EXT_CLK_STOP_WAKEUP:
break;
default:
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"%s: SWR unknown interrupt value: %d\n",
__func__, value);
ret = IRQ_NONE;
break;
}
}
- swrm->reg_write(swrm, SWRM_INTERRUPT_CLEAR, intr_sts);
- swrm->reg_read(swrm, SWRM_INTERRUPT_STATUS, &intr_sts);
- intr_sts_masked = intr_sts & swrm->intr_mask;
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CLEAR],
+ intr_sts);
+ ctrl->reg_read(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_STATUS],
+ &intr_sts);
+ intr_sts_masked = intr_sts & ctrl->intr_mask;
} while (intr_sts_masked);
- clk_disable_unprepare(swrm->hclk);
+ clk_disable_unprepare(ctrl->hclk);
return ret;
}
@@ -689,18 +783,23 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
ctrl->intr_mask = SWRM_INTERRUPT_STATUS_RMSK;
/* Mask soundwire interrupts */
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR,
- SWRM_INTERRUPT_STATUS_RMSK);
+ if (ctrl->version < SWRM_VERSION_2_0_0)
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_MASK_ADDR],
+ SWRM_INTERRUPT_STATUS_RMSK);
/* Configure No pings */
ctrl->reg_read(ctrl, SWRM_MCP_CFG_ADDR, &val);
u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
- if (ctrl->version >= SWRM_VERSION_1_7_0) {
+ if (ctrl->version == SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
+ } else if (ctrl->version >= SWRM_VERSION_2_0_0) {
+ ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
+ ctrl->reg_write(ctrl, SWRM_V2_0_CLK_CTRL,
+ SWRM_V2_0_CLK_CTRL_CLK_START);
} else {
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
}
@@ -722,7 +821,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
/* enable CPU IRQs */
if (ctrl->mmio) {
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN,
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
SWRM_INTERRUPT_STATUS_RMSK);
}
ctrl->slave_status = 0;
@@ -806,12 +905,20 @@ static int qcom_swrm_transport_params(struct sdw_bus *bus,
value = pcfg->off1 << SWRM_DP_PORT_CTRL_OFFSET1_SHFT;
value |= pcfg->off2 << SWRM_DP_PORT_CTRL_OFFSET2_SHFT;
- value |= pcfg->si;
+ value |= pcfg->si & 0xff;
ret = ctrl->reg_write(ctrl, reg, value);
if (ret)
goto err;
+ if (pcfg->si > 0xff) {
+ value = (pcfg->si >> 8) & 0xff;
+ reg = SWRM_DP_SAMPLECTRL2_BANK(params->port_num, bank);
+ ret = ctrl->reg_write(ctrl, reg, value);
+ if (ret)
+ goto err;
+ }
+
if (pcfg->lane_control != SWR_INVALID_PARAM) {
reg = SWRM_DP_PORT_CTRL_2_BANK(params->port_num, bank);
value = pcfg->lane_control;
@@ -1185,7 +1292,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
struct device_node *np = ctrl->dev->of_node;
u8 off1[QCOM_SDW_MAX_PORTS];
u8 off2[QCOM_SDW_MAX_PORTS];
- u8 si[QCOM_SDW_MAX_PORTS];
+ u16 si[QCOM_SDW_MAX_PORTS];
u8 bp_mode[QCOM_SDW_MAX_PORTS] = { 0, };
u8 hstart[QCOM_SDW_MAX_PORTS];
u8 hstop[QCOM_SDW_MAX_PORTS];
@@ -1193,6 +1300,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
u8 blk_group_count[QCOM_SDW_MAX_PORTS];
u8 lane_control[QCOM_SDW_MAX_PORTS];
int i, ret, nports, val;
+ bool si_16 = false;
ctrl->reg_read(ctrl, SWRM_COMP_PARAMS, &val);
@@ -1236,9 +1344,14 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
return ret;
ret = of_property_read_u8_array(np, "qcom,ports-sinterval-low",
- si, nports);
- if (ret)
- return ret;
+ (u8 *)si, nports);
+ if (ret) {
+ ret = of_property_read_u16_array(np, "qcom,ports-sinterval",
+ si, nports);
+ if (ret)
+ return ret;
+ si_16 = true;
+ }
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
@@ -1266,7 +1379,10 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
for (i = 0; i < nports; i++) {
/* Valid port number range is from 1-14 */
- ctrl->pconfig[i + 1].si = si[i];
+ if (si_16)
+ ctrl->pconfig[i + 1].si = si[i];
+ else
+ ctrl->pconfig[i + 1].si = ((u8 *)si)[i];
ctrl->pconfig[i + 1].off1 = off1[i];
ctrl->pconfig[i + 1].off2 = off2[i];
ctrl->pconfig[i + 1].bp_mode = bp_mode[i];
@@ -1283,23 +1399,23 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
#ifdef CONFIG_DEBUG_FS
static int swrm_reg_show(struct seq_file *s_file, void *data)
{
- struct qcom_swrm_ctrl *swrm = s_file->private;
+ struct qcom_swrm_ctrl *ctrl = s_file->private;
int reg, reg_val, ret;
- ret = pm_runtime_resume_and_get(swrm->dev);
+ ret = pm_runtime_resume_and_get(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
- dev_err_ratelimited(swrm->dev,
+ dev_err_ratelimited(ctrl->dev,
"pm_runtime_resume_and_get failed in %s, ret %d\n",
__func__, ret);
return ret;
}
- for (reg = 0; reg <= SWR_MSTR_MAX_REG_ADDR; reg += 4) {
- swrm->reg_read(swrm, reg, &reg_val);
+ for (reg = 0; reg <= ctrl->max_reg; reg += 4) {
+ ctrl->reg_read(ctrl, reg, &reg_val);
seq_printf(s_file, "0x%.3x: 0x%.2x\n", reg, reg_val);
}
- pm_runtime_mark_last_busy(swrm->dev);
- pm_runtime_put_autosuspend(swrm->dev);
+ pm_runtime_mark_last_busy(ctrl->dev);
+ pm_runtime_put_autosuspend(ctrl->dev);
return 0;
@@ -1322,6 +1438,8 @@ static int qcom_swrm_probe(struct platform_device *pdev)
return -ENOMEM;
data = of_device_get_match_data(dev);
+ ctrl->max_reg = data->max_reg;
+ ctrl->reg_layout = data->reg_layout;
ctrl->rows_index = sdw_find_row_index(data->default_rows);
ctrl->cols_index = sdw_find_col_index(data->default_cols);
#if IS_REACHABLE(CONFIG_SLIMBUS)
@@ -1480,13 +1598,13 @@ static int qcom_swrm_remove(struct platform_device *pdev)
return 0;
}
-static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm)
+static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *ctrl)
{
int retry = SWRM_LINK_STATUS_RETRY_CNT;
int comp_sts;
do {
- swrm->reg_read(swrm, SWRM_COMP_STATUS, &comp_sts);
+ ctrl->reg_read(ctrl, SWRM_COMP_STATUS, &comp_sts);
if (comp_sts & SWRM_FRM_GEN_ENABLED)
return true;
@@ -1494,7 +1612,7 @@ static bool swrm_wait_for_frame_gen_enabled(struct qcom_swrm_ctrl *swrm)
usleep_range(500, 510);
} while (retry--);
- dev_err(swrm->dev, "%s: link status not %s\n", __func__,
+ dev_err(ctrl->dev, "%s: link status not %s\n", __func__,
comp_sts & SWRM_FRM_GEN_ENABLED ? "connected" : "disconnected");
return false;
@@ -1531,19 +1649,27 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev)
} else {
reset_control_reset(ctrl->audio_cgcr);
- if (ctrl->version >= SWRM_VERSION_1_7_0) {
+ if (ctrl->version == SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
+ } else if (ctrl->version >= SWRM_VERSION_2_0_0) {
+ ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
+ ctrl->reg_write(ctrl, SWRM_V2_0_CLK_CTRL,
+ SWRM_V2_0_CLK_CTRL_CLK_START);
} else {
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL, SWRM_MCP_BUS_CLK_START);
}
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CLEAR,
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CLEAR],
SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET);
ctrl->intr_mask |= SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR, ctrl->intr_mask);
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN, ctrl->intr_mask);
+ if (ctrl->version < SWRM_VERSION_2_0_0)
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_MASK_ADDR],
+ ctrl->intr_mask);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
usleep_range(100, 105);
if (!swrm_wait_for_frame_gen_enabled(ctrl))
@@ -1565,8 +1691,12 @@ static int __maybe_unused swrm_runtime_suspend(struct device *dev)
if (!ctrl->clock_stop_not_supported) {
/* Mask bus clash interrupt */
ctrl->intr_mask &= ~SWRM_INTERRUPT_STATUS_MASTER_CLASH_DET;
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_MASK_ADDR, ctrl->intr_mask);
- ctrl->reg_write(ctrl, SWRM_INTERRUPT_CPU_EN, ctrl->intr_mask);
+ if (ctrl->version < SWRM_VERSION_2_0_0)
+ ctrl->reg_write(ctrl,
+ ctrl->reg_layout[SWRM_REG_INTERRUPT_MASK_ADDR],
+ ctrl->intr_mask);
+ ctrl->reg_write(ctrl, ctrl->reg_layout[SWRM_REG_INTERRUPT_CPU_EN],
+ ctrl->intr_mask);
/* Prepare slaves for clock stop */
ret = sdw_bus_prep_clk_stop(&ctrl->bus);
if (ret < 0 && ret != -ENODATA) {
@@ -1602,6 +1732,7 @@ static const struct of_device_id qcom_swrm_of_match[] = {
{ .compatible = "qcom,soundwire-v1.5.1", .data = &swrm_v1_5_data },
{ .compatible = "qcom,soundwire-v1.6.0", .data = &swrm_v1_6_data },
{ .compatible = "qcom,soundwire-v1.7.0", .data = &swrm_v1_5_data },
+ { .compatible = "qcom,soundwire-v2.0.0", .data = &swrm_v2_0_data },
{/* sentinel */},
};
diff --git a/drivers/spi/spi-dw-dma.c b/drivers/spi/spi-dw-dma.c
index ababb910b391..f19c092920a1 100644
--- a/drivers/spi/spi-dw-dma.c
+++ b/drivers/spi/spi-dw-dma.c
@@ -198,6 +198,20 @@ static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws)
return IRQ_HANDLED;
}
+static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
+{
+ switch (n_bytes) {
+ case 1:
+ return DMA_SLAVE_BUSWIDTH_1_BYTE;
+ case 2:
+ return DMA_SLAVE_BUSWIDTH_2_BYTES;
+ case 4:
+ return DMA_SLAVE_BUSWIDTH_4_BYTES;
+ default:
+ return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ }
+}
+
static bool dw_spi_can_dma(struct spi_controller *master,
struct spi_device *spi, struct spi_transfer *xfer)
{
@@ -206,16 +220,6 @@ static bool dw_spi_can_dma(struct spi_controller *master,
return xfer->len > dws->fifo_len;
}
-static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes)
-{
- if (n_bytes == 1)
- return DMA_SLAVE_BUSWIDTH_1_BYTE;
- else if (n_bytes == 2)
- return DMA_SLAVE_BUSWIDTH_2_BYTES;
-
- return DMA_SLAVE_BUSWIDTH_UNDEFINED;
-}
-
static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed)
{
unsigned long long ms;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index f2341ab99556..d9f8231bcea8 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -303,6 +303,12 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
perclk_rate = clk_get_rate(fsl_lpspi->clk_per);
+ if (!config.speed_hz) {
+ dev_err(fsl_lpspi->dev,
+ "error: the transmission speed provided is 0!\n");
+ return -EINVAL;
+ }
+
if (config.speed_hz > perclk_rate / 2) {
dev_err(fsl_lpspi->dev,
"per-clk should be at least two times of transfer speed");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index ba7be505ec4e..8a7d1c2eb4f0 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -35,7 +35,7 @@
#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
#define SE_SPI_TRANS_CFG 0x25c
-#define CS_TOGGLE BIT(0)
+#define CS_TOGGLE BIT(1)
#define SE_SPI_WORD_LEN 0x268
#define WORD_LEN_MSK GENMASK(9, 0)
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index fab155389999..a3991e617c90 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -2,6 +2,8 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -62,6 +64,7 @@
#define WR_FIFO_FULL BIT(10)
#define WR_FIFO_OVERRUN BIT(11)
#define TRANSACTION_DONE BIT(16)
+#define DMA_CHAIN_DONE BIT(31)
#define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
WR_FIFO_OVERRUN)
#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
@@ -108,18 +111,34 @@
#define RD_FIFO_RESET 0x0030
#define RESET_FIFO BIT(0)
+#define NEXT_DMA_DESC_ADDR 0x0040
+#define CURRENT_DMA_DESC_ADDR 0x0044
+#define CURRENT_MEM_ADDR 0x0048
+
#define CUR_MEM_ADDR 0x0048
#define HW_VERSION 0x004c
#define RD_FIFO 0x0050
#define SAMPLING_CLK_CFG 0x0090
#define SAMPLING_CLK_STATUS 0x0094
+#define QSPI_ALIGN_REQ 32
enum qspi_dir {
QSPI_READ,
QSPI_WRITE,
};
+struct qspi_cmd_desc {
+ u32 data_address;
+ u32 next_descriptor;
+ u32 direction:1;
+ u32 multi_io_mode:3;
+ u32 reserved1:4;
+ u32 fragment:1;
+ u32 reserved2:7;
+ u32 length:16;
+};
+
struct qspi_xfer {
union {
const void *tx_buf;
@@ -137,11 +156,23 @@ enum qspi_clocks {
QSPI_NUM_CLKS
};
+/*
+ * Number of entries in sgt returned from spi framework that-
+ * will be supported. Can be modified as required.
+ * In practice, given max_dma_len is 64KB, the number of
+ * entries is not expected to exceed 1.
+ */
+#define QSPI_MAX_SG 5
+
struct qcom_qspi {
void __iomem *base;
struct device *dev;
struct clk_bulk_data *clks;
struct qspi_xfer xfer;
+ struct dma_pool *dma_cmd_pool;
+ dma_addr_t dma_cmd_desc[QSPI_MAX_SG];
+ void *virt_cmd_desc[QSPI_MAX_SG];
+ unsigned int n_cmd_desc;
struct icc_path *icc_path_cpu_to_qspi;
unsigned long last_speed;
/* Lock to protect data accessed by IRQs */
@@ -153,21 +184,22 @@ static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
{
switch (buswidth) {
case 1:
- return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ return SDR_1BIT;
case 2:
- return SDR_2BIT << MULTI_IO_MODE_SHFT;
+ return SDR_2BIT;
case 4:
- return SDR_4BIT << MULTI_IO_MODE_SHFT;
+ return SDR_4BIT;
default:
dev_warn_once(ctrl->dev,
"Unexpected bus width: %u\n", buswidth);
- return SDR_1BIT << MULTI_IO_MODE_SHFT;
+ return SDR_1BIT;
}
}
static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
{
u32 pio_xfer_cfg;
+ u32 iomode;
const struct qspi_xfer *xfer;
xfer = &ctrl->xfer;
@@ -179,7 +211,8 @@ static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
else
pio_xfer_cfg |= TRANSFER_FRAGMENT;
pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
- pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
+ iomode = qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
+ pio_xfer_cfg |= iomode << MULTI_IO_MODE_SHFT;
writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
}
@@ -217,12 +250,22 @@ static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
static void qcom_qspi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
+ u32 int_status;
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
unsigned long flags;
+ int i;
spin_lock_irqsave(&ctrl->lock, flags);
writel(0, ctrl->base + MSTR_INT_EN);
+ int_status = readl(ctrl->base + MSTR_INT_STATUS);
+ writel(int_status, ctrl->base + MSTR_INT_STATUS);
ctrl->xfer.rem_bytes = 0;
+
+ /* free cmd descriptors if they are around (DMA mode) */
+ for (i = 0; i < ctrl->n_cmd_desc; i++)
+ dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
+ ctrl->dma_cmd_desc[i]);
+ ctrl->n_cmd_desc = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
}
@@ -242,7 +285,7 @@ static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
}
/*
- * Set BW quota for CPU as driver supports FIFO mode only.
+ * Set BW quota for CPU.
* We don't have explicit peak requirement so keep it equal to avg_bw.
*/
avg_bw_cpu = Bps_to_icc(speed_hz);
@@ -258,6 +301,102 @@ static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
return 0;
}
+static int qcom_qspi_alloc_desc(struct qcom_qspi *ctrl, dma_addr_t dma_ptr,
+ uint32_t n_bytes)
+{
+ struct qspi_cmd_desc *virt_cmd_desc, *prev;
+ dma_addr_t dma_cmd_desc;
+
+ /* allocate for dma cmd descriptor */
+ virt_cmd_desc = dma_pool_alloc(ctrl->dma_cmd_pool, GFP_KERNEL | __GFP_ZERO, &dma_cmd_desc);
+ if (!virt_cmd_desc)
+ return -ENOMEM;
+
+ ctrl->virt_cmd_desc[ctrl->n_cmd_desc] = virt_cmd_desc;
+ ctrl->dma_cmd_desc[ctrl->n_cmd_desc] = dma_cmd_desc;
+ ctrl->n_cmd_desc++;
+
+ /* setup cmd descriptor */
+ virt_cmd_desc->data_address = dma_ptr;
+ virt_cmd_desc->direction = ctrl->xfer.dir;
+ virt_cmd_desc->multi_io_mode = qspi_buswidth_to_iomode(ctrl, ctrl->xfer.buswidth);
+ virt_cmd_desc->fragment = !ctrl->xfer.is_last;
+ virt_cmd_desc->length = n_bytes;
+
+ /* update previous descriptor */
+ if (ctrl->n_cmd_desc >= 2) {
+ prev = (ctrl->virt_cmd_desc)[ctrl->n_cmd_desc - 2];
+ prev->next_descriptor = dma_cmd_desc;
+ prev->fragment = 1;
+ }
+
+ return 0;
+}
+
+static int qcom_qspi_setup_dma_desc(struct qcom_qspi *ctrl,
+ struct spi_transfer *xfer)
+{
+ int ret;
+ struct sg_table *sgt;
+ dma_addr_t dma_ptr_sg;
+ unsigned int dma_len_sg;
+ int i;
+
+ if (ctrl->n_cmd_desc) {
+ dev_err(ctrl->dev, "Remnant dma buffers n_cmd_desc-%d\n", ctrl->n_cmd_desc);
+ return -EIO;
+ }
+
+ sgt = (ctrl->xfer.dir == QSPI_READ) ? &xfer->rx_sg : &xfer->tx_sg;
+ if (!sgt->nents || sgt->nents > QSPI_MAX_SG) {
+ dev_warn_once(ctrl->dev, "Cannot handle %d entries in scatter list\n", sgt->nents);
+ return -EAGAIN;
+ }
+
+ for (i = 0; i < sgt->nents; i++) {
+ dma_ptr_sg = sg_dma_address(sgt->sgl + i);
+ if (!IS_ALIGNED(dma_ptr_sg, QSPI_ALIGN_REQ)) {
+ dev_warn_once(ctrl->dev, "dma_address not aligned to %d\n", QSPI_ALIGN_REQ);
+ return -EAGAIN;
+ }
+ }
+
+ for (i = 0; i < sgt->nents; i++) {
+ dma_ptr_sg = sg_dma_address(sgt->sgl + i);
+ dma_len_sg = sg_dma_len(sgt->sgl + i);
+
+ ret = qcom_qspi_alloc_desc(ctrl, dma_ptr_sg, dma_len_sg);
+ if (ret)
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ for (i = 0; i < ctrl->n_cmd_desc; i++)
+ dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
+ ctrl->dma_cmd_desc[i]);
+ ctrl->n_cmd_desc = 0;
+ return ret;
+}
+
+static void qcom_qspi_dma_xfer(struct qcom_qspi *ctrl)
+{
+ /* Setup new interrupts */
+ writel(DMA_CHAIN_DONE, ctrl->base + MSTR_INT_EN);
+
+ /* kick off transfer */
+ writel((u32)((ctrl->dma_cmd_desc)[0]), ctrl->base + NEXT_DMA_DESC_ADDR);
+}
+
+/* Switch to DMA if transfer length exceeds this */
+#define QSPI_MAX_BYTES_FIFO 64
+
+static bool qcom_qspi_can_dma(struct spi_controller *ctlr,
+ struct spi_device *slv, struct spi_transfer *xfer)
+{
+ return xfer->len > QSPI_MAX_BYTES_FIFO;
+}
+
static int qcom_qspi_transfer_one(struct spi_master *master,
struct spi_device *slv,
struct spi_transfer *xfer)
@@ -266,6 +405,7 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
int ret;
unsigned long speed_hz;
unsigned long flags;
+ u32 mstr_cfg;
speed_hz = slv->max_speed_hz;
if (xfer->speed_hz)
@@ -276,6 +416,7 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
return ret;
spin_lock_irqsave(&ctrl->lock, flags);
+ mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
/* We are half duplex, so either rx or tx will be set */
if (xfer->rx_buf) {
@@ -290,10 +431,36 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
&master->cur_msg->transfers);
ctrl->xfer.rem_bytes = xfer->len;
+
+ if (xfer->rx_sg.nents || xfer->tx_sg.nents) {
+ /* do DMA transfer */
+ if (!(mstr_cfg & DMA_ENABLE)) {
+ mstr_cfg |= DMA_ENABLE;
+ writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
+ }
+
+ ret = qcom_qspi_setup_dma_desc(ctrl, xfer);
+ if (ret != -EAGAIN) {
+ if (!ret)
+ qcom_qspi_dma_xfer(ctrl);
+ goto exit;
+ }
+ dev_warn_once(ctrl->dev, "DMA failure, falling back to PIO");
+ ret = 0; /* We'll retry w/ PIO */
+ }
+
+ if (mstr_cfg & DMA_ENABLE) {
+ mstr_cfg &= ~DMA_ENABLE;
+ writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
+ }
qcom_qspi_pio_xfer(ctrl);
+exit:
spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (ret)
+ return ret;
+
/* We'll call spi_finalize_current_transfer() when done */
return 1;
}
@@ -328,6 +495,16 @@ static int qcom_qspi_prepare_message(struct spi_master *master,
return 0;
}
+static int qcom_qspi_alloc_dma(struct qcom_qspi *ctrl)
+{
+ ctrl->dma_cmd_pool = dmam_pool_create("qspi cmd desc pool",
+ ctrl->dev, sizeof(struct qspi_cmd_desc), 0, 0);
+ if (!ctrl->dma_cmd_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
static irqreturn_t pio_read(struct qcom_qspi *ctrl)
{
u32 rd_fifo_status;
@@ -426,6 +603,7 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
+ /* PIO mode handling */
if (ctrl->xfer.dir == QSPI_WRITE) {
if (int_status & WR_FIFO_EMPTY)
ret = pio_write(ctrl);
@@ -449,6 +627,22 @@ static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
+ /* DMA mode handling */
+ if (int_status & DMA_CHAIN_DONE) {
+ int i;
+
+ writel(0, ctrl->base + MSTR_INT_EN);
+ ctrl->xfer.rem_bytes = 0;
+
+ for (i = 0; i < ctrl->n_cmd_desc; i++)
+ dma_pool_free(ctrl->dma_cmd_pool, ctrl->virt_cmd_desc[i],
+ ctrl->dma_cmd_desc[i]);
+ ctrl->n_cmd_desc = 0;
+
+ ret = IRQ_HANDLED;
+ spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
+ }
+
spin_unlock(&ctrl->lock);
return ret;
}
@@ -517,7 +711,13 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return ret;
}
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "could not set DMA mask\n");
+
master->max_speed_hz = 300000000;
+ master->max_dma_len = 65536; /* as per HPG */
+ master->dma_alignment = QSPI_ALIGN_REQ;
master->num_chipselect = QSPI_NUM_CS;
master->bus_num = -1;
master->dev.of_node = pdev->dev.of_node;
@@ -528,6 +728,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
master->prepare_message = qcom_qspi_prepare_message;
master->transfer_one = qcom_qspi_transfer_one;
master->handle_err = qcom_qspi_handle_err;
+ if (of_property_read_bool(pdev->dev.of_node, "iommus"))
+ master->can_dma = qcom_qspi_can_dma;
master->auto_runtime_pm = true;
ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
@@ -540,6 +742,10 @@ static int qcom_qspi_probe(struct platform_device *pdev)
return ret;
}
+ ret = qcom_qspi_alloc_dma(ctrl);
+ if (ret)
+ return ret;
+
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 250);
pm_runtime_enable(dev);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 7ac17f0d18a9..f79732ff3a27 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -19,7 +19,6 @@
#include <linux/platform_data/spi-s3c64xx.h>
#define MAX_SPI_PORTS 12
-#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
#define AUTOSUSPEND_TIMEOUT 2000
@@ -59,6 +58,8 @@
#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_RX_RDY_LVL GENMASK(16, 11)
+#define S3C64XX_SPI_MODE_RX_RDY_LVL_SHIFT 11
#define S3C64XX_SPI_MODE_SELF_LOOPBACK (1<<3)
#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
@@ -115,8 +116,10 @@
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
+#define S3C64XX_SPI_POLLING_SIZE 32
+
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
+#define is_polling(x) (x->cntrlr_info->polling)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
@@ -553,7 +556,7 @@ static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
}
static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer)
+ struct spi_transfer *xfer, bool use_irq)
{
void __iomem *regs = sdd->regs;
unsigned long val;
@@ -562,11 +565,24 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
u32 cpy_len;
u8 *buf;
int ms;
+ unsigned long time_us;
- /* millisecs to xfer 'len' bytes @ 'cur_speed' */
- ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ /* microsecs to xfer 'len' bytes @ 'cur_speed' */
+ time_us = (xfer->len * 8 * 1000 * 1000) / sdd->cur_speed;
+ ms = (time_us / 1000);
ms += 10; /* some tolerance */
+ /* sleep during signal transfer time */
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ if (RX_FIFO_LVL(status, sdd) < xfer->len)
+ usleep_range(time_us / 2, time_us);
+
+ if (use_irq) {
+ val = msecs_to_jiffies(ms);
+ if (!wait_for_completion_timeout(&sdd->xfer_completion, val))
+ return -EIO;
+ }
+
val = msecs_to_loops(ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
@@ -729,10 +745,13 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
void *rx_buf = NULL;
int target_len = 0, origin_len = 0;
int use_dma = 0;
+ bool use_irq = false;
int status;
u32 speed;
u8 bpw;
unsigned long flags;
+ u32 rdy_lv;
+ u32 val;
reinit_completion(&sdd->xfer_completion);
@@ -753,17 +772,46 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->rx_dma.ch && sdd->tx_dma.ch) {
use_dma = 1;
- } else if (xfer->len > fifo_len) {
+ } else if (xfer->len >= fifo_len) {
tx_buf = xfer->tx_buf;
rx_buf = xfer->rx_buf;
origin_len = xfer->len;
-
target_len = xfer->len;
- if (xfer->len > fifo_len)
- xfer->len = fifo_len;
+ xfer->len = fifo_len - 1;
}
do {
+ /* transfer size is greater than 32, change to IRQ mode */
+ if (xfer->len > S3C64XX_SPI_POLLING_SIZE)
+ use_irq = true;
+
+ if (use_irq) {
+ reinit_completion(&sdd->xfer_completion);
+
+ rdy_lv = xfer->len;
+ /* Setup RDY_FIFO trigger Level
+ * RDY_LVL =
+ * fifo_lvl up to 64 byte -> N bytes
+ * 128 byte -> RDY_LVL * 2 bytes
+ * 256 byte -> RDY_LVL * 4 bytes
+ */
+ if (fifo_len == 128)
+ rdy_lv /= 2;
+ else if (fifo_len == 256)
+ rdy_lv /= 4;
+
+ val = readl(sdd->regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~S3C64XX_SPI_MODE_RX_RDY_LVL;
+ val |= (rdy_lv << S3C64XX_SPI_MODE_RX_RDY_LVL_SHIFT);
+ writel(val, sdd->regs + S3C64XX_SPI_MODE_CFG);
+
+ /* Enable FIFO_RDY_EN IRQ */
+ val = readl(sdd->regs + S3C64XX_SPI_INT_EN);
+ writel((val | S3C64XX_SPI_INT_RX_FIFORDY_EN),
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
+ }
+
spin_lock_irqsave(&sdd->lock, flags);
/* Pending only which is to be done */
@@ -785,7 +833,7 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer);
else
- status = s3c64xx_wait_for_pio(sdd, xfer);
+ status = s3c64xx_wait_for_pio(sdd, xfer, use_irq);
if (status) {
dev_err(&spi->dev,
@@ -824,8 +872,8 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (xfer->rx_buf)
xfer->rx_buf += xfer->len;
- if (target_len > fifo_len)
- xfer->len = fifo_len;
+ if (target_len >= fifo_len)
+ xfer->len = fifo_len - 1;
else
xfer->len = target_len;
}
@@ -995,6 +1043,14 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
dev_err(&spi->dev, "TX underrun\n");
}
+ if (val & S3C64XX_SPI_ST_RX_FIFORDY) {
+ complete(&sdd->xfer_completion);
+ /* No pending clear irq, turn-off INT_EN_RX_FIFO_RDY */
+ val = readl(sdd->regs + S3C64XX_SPI_INT_EN);
+ writel((val & ~S3C64XX_SPI_INT_RX_FIFORDY_EN),
+ sdd->regs + S3C64XX_SPI_INT_EN);
+ }
+
/* Clear the pending irq by setting and then clearing it */
writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
@@ -1068,6 +1124,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
}
sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
+ sci->polling = !of_property_present(dev->of_node, "dmas");
return sci;
}
diff --git a/drivers/staging/octeon/TODO b/drivers/staging/octeon/TODO
index 67a0a1f6b922..044e48e3d65f 100644
--- a/drivers/staging/octeon/TODO
+++ b/drivers/staging/octeon/TODO
@@ -6,4 +6,3 @@ TODO:
- make driver self-contained instead of being split between staging and
arch/mips/cavium-octeon.
-Contact: Aaro Koskinen <aaro.koskinen@iki.fi>
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 17d7bb875fee..8d8af3c55b07 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -98,7 +98,7 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
-/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
static bool is_mcq_supported(struct ufs_hba *hba)
@@ -106,23 +106,7 @@ static bool is_mcq_supported(struct ufs_hba *hba)
return hba->mcq_sup && use_mcq_mode;
}
-static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
-{
- int ret;
-
- ret = param_set_bool(val, kp);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static const struct kernel_param_ops mcq_mode_ops = {
- .set = param_set_mcq_mode,
- .get = param_get_bool,
-};
-
-module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
@@ -9459,8 +9443,16 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* that performance might be impacted.
*/
ret = ufshcd_urgent_bkops(hba);
- if (ret)
+ if (ret) {
+ /*
+ * If return err in suspend flow, IO will hang.
+ * Trigger error handler and break suspend for
+ * error recovery.
+ */
+ ufshcd_force_error_recovery(hba);
+ ret = -EBUSY;
goto enable_scaling;
+ }
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 73e217260390..a054810e321d 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -410,9 +410,6 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
- if (val == state)
- return 0;
-
return -ETIMEDOUT;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 07181cd8d52e..ae2273196b0c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -935,13 +935,18 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
+ bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
+
if (zcopy_used) {
if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
vhost_net_ubuf_put(ubufs);
- nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
- % UIO_MAXIOV;
+ if (retry)
+ nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
+ % UIO_MAXIOV;
+ else
+ vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
}
- if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
+ if (retry) {
vhost_discard_vq_desc(vq, 1);
vhost_net_enable_vq(net, vq);
break;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 8c1aefc865f0..bf77924d5b60 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -407,7 +407,10 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_dev *d = &v->vdev;
+ u64 actual_features;
u64 features;
+ int i;
/*
* It's not allowed to change the features after they have
@@ -422,6 +425,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (vdpa_set_features(vdpa, features))
return -EINVAL;
+ /* let the vqs know what has been configured */
+ actual_features = ops->get_driver_features(vdpa);
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq = d->vqs[i];
+
+ mutex_lock(&vq->mutex);
+ vq->acked_features = actual_features;
+ mutex_unlock(&vq->mutex);
+ }
+
return 0;
}
@@ -594,7 +607,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (r)
return r;
- vq->last_avail_idx = vq_state.split.avail_index;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = vq_state.packed.last_avail_idx |
+ (vq_state.packed.last_avail_counter << 15);
+ vq->last_used_idx = vq_state.packed.last_used_idx |
+ (vq_state.packed.last_used_counter << 15);
+ } else {
+ vq->last_avail_idx = vq_state.split.avail_index;
+ }
break;
}
@@ -612,9 +632,15 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
break;
case VHOST_SET_VRING_BASE:
- vq_state.split.avail_index = vq->last_avail_idx;
- if (ops->set_vq_state(vdpa, idx, &vq_state))
- r = -EINVAL;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
+ vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
+ vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
+ vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
+ } else {
+ vq_state.split.avail_index = vq->last_avail_idx;
+ }
+ r = ops->set_vq_state(vdpa, idx, &vq_state);
break;
case VHOST_SET_VRING_CALL:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index a92af08e7864..e2953927240e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1626,17 +1626,25 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
- if (s.num > 0xffff) {
- r = -EINVAL;
- break;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
+ vq->last_avail_idx = s.num & 0xffff;
+ vq->last_used_idx = (s.num >> 16) & 0xffff;
+ } else {
+ if (s.num > 0xffff) {
+ r = -EINVAL;
+ break;
+ }
+ vq->last_avail_idx = s.num;
}
- vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
- s.num = vq->last_avail_idx;
+ if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
+ s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
+ else
+ s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 0308638cdeee..56b016484e0a 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -92,13 +92,17 @@ struct vhost_virtqueue {
/* The routine to call when the Guest pings us, or timeout. */
vhost_work_fn_t handle_kick;
- /* Last available index we saw. */
+ /* Last available index we saw.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx;
/* Caches available index value from user. */
u16 avail_idx;
- /* Last index we used. */
+ /* Last index we used.
+ * Values are limited to 0x7fff, and the high bit is used as
+ * a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_used_idx;
/* Used flags */
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
index d75ab3f66da4..cecdc1c13af7 100644
--- a/drivers/virt/acrn/ioreq.c
+++ b/drivers/virt/acrn/ioreq.c
@@ -576,8 +576,8 @@ static void ioreq_resume(void)
int acrn_ioreq_intr_setup(void)
{
acrn_setup_intr_handler(ioreq_intr_handler);
- ioreq_wq = alloc_workqueue("ioreq_wq",
- WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ ioreq_wq = alloc_ordered_workqueue("ioreq_wq",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!ioreq_wq) {
dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
acrn_remove_intr_handler();
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 1f5219e12cc3..b41516f3f84a 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -361,7 +361,7 @@ static struct sock_mapping *pvcalls_new_active_socket(
map->data.in = map->bytes;
map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
- map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
+ map->ioworker.wq = alloc_ordered_workqueue("pvcalls_io", 0);
if (!map->ioworker.wq)
goto out;
atomic_set(&map->io, 1);
@@ -637,7 +637,7 @@ static int pvcalls_back_bind(struct xenbus_device *dev,
INIT_WORK(&map->register_work, __pvcalls_back_accept);
spin_lock_init(&map->copy_lock);
- map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
+ map->wq = alloc_ordered_workqueue("pvcalls_wq", 0);
if (!map->wq) {
ret = -ENOMEM;
goto out;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 06a2514f0d88..698c43dd5dc8 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -108,7 +108,7 @@ enum p9_cache_bits {
struct v9fs_session_info {
/* options */
- unsigned char flags;
+ unsigned int flags;
unsigned char nodev;
unsigned short debug;
unsigned int afid;
diff --git a/fs/aio.c b/fs/aio.c
index b0b17bd098bb..4a7576989719 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -558,7 +558,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_SHARED, 0, &unused, NULL);
+ MAP_SHARED, 0, 0, &unused, NULL);
mmap_write_unlock(mm);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index e54f0884802a..787417f9893c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -45,7 +45,8 @@ static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
int root_count;
bool cached;
- if (!btrfs_file_extent_compression(eb, fi) &&
+ if (!ctx->ignore_extent_item_pos &&
+ !btrfs_file_extent_compression(eb, fi) &&
!btrfs_file_extent_encryption(eb, fi) &&
!btrfs_file_extent_other_encoding(eb, fi)) {
u64 data_offset;
@@ -552,13 +553,10 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
count++;
else
goto next;
- if (!ctx->ignore_extent_item_pos) {
- ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
- if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
- ret < 0)
- break;
- }
- if (ret > 0)
+ ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
+ if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
+ break;
+ else if (ret > 0)
goto next;
ret = ulist_add_merge_ptr(parents, eb->start,
eie, (void **)&old, GFP_NOFS);
@@ -1606,8 +1604,7 @@ again:
goto out;
}
if (ref->count && ref->parent) {
- if (!ctx->ignore_extent_item_pos && !ref->inode_list &&
- ref->level == 0) {
+ if (!ref->inode_list && ref->level == 0) {
struct btrfs_tree_parent_check check = { 0 };
struct extent_buffer *eb;
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 3ab707e26fa2..ac18c43fadad 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -124,7 +124,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
} else {
num_bytes = 0;
}
- if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ if (qgroup_to_release_ret &&
+ block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
qgroup_to_release = block_rsv->qgroup_rsv_reserved -
block_rsv->qgroup_rsv_size;
block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 3c983c70028a..2ff2961b1183 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2627,6 +2627,10 @@ static bool check_sibling_keys(struct extent_buffer *left,
}
if (btrfs_comp_cpu_keys(&left_last, &right_first) >= 0) {
+ btrfs_crit(left->fs_info, "left extent buffer:");
+ btrfs_print_tree(left, false);
+ btrfs_crit(left->fs_info, "right extent buffer:");
+ btrfs_print_tree(right, false);
btrfs_crit(left->fs_info,
"bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
left_last.objectid, left_last.type,
@@ -3215,6 +3219,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (check_sibling_keys(left, right)) {
ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
btrfs_tree_unlock(right);
free_extent_buffer(right);
return ret;
@@ -3433,6 +3438,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
if (check_sibling_keys(left, right)) {
ret = -EUCLEAN;
+ btrfs_abort_transaction(trans, ret);
goto out;
}
return __push_leaf_left(trans, path, min_data_size, empty, left,
@@ -4478,10 +4484,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
{
struct btrfs_key key;
+ struct btrfs_key orig_key;
struct btrfs_disk_key found_key;
int ret;
btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
+ orig_key = key;
if (key.offset > 0) {
key.offset--;
@@ -4498,8 +4506,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
- if (ret < 0)
+ if (ret <= 0)
return ret;
+
+ /*
+ * Previous key not found. Even if we were at slot 0 of the leaf we had
+ * before releasing the path and calling btrfs_search_slot(), we now may
+ * be in a slot pointing to the same original key - this can happen if
+ * after we released the path, one of more items were moved from a
+ * sibling leaf into the front of the leaf we had due to an insertion
+ * (see push_leaf_right()).
+ * If we hit this case and our slot is > 0 and just decrement the slot
+ * so that the caller does not process the same key again, which may or
+ * may not break the caller, depending on its logic.
+ */
+ if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
+ btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
+ ret = comp_keys(&found_key, &orig_key);
+ if (ret == 0) {
+ if (path->slots[0] > 0) {
+ path->slots[0]--;
+ return 0;
+ }
+ /*
+ * At slot 0, same key as before, it means orig_key is
+ * the lowest, leftmost, key in the tree. We're done.
+ */
+ return 1;
+ }
+ }
+
btrfs_item_key(path->nodes[0], &found_key, 0);
ret = comp_keys(&found_key, &key);
/*
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 018c711a0bc8..cd4cce9ba443 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -52,13 +52,13 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
u64 start, end, i_size;
int ret;
+ spin_lock(&inode->lock);
i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
inode->disk_i_size = i_size;
- return;
+ goto out_unlock;
}
- spin_lock(&inode->lock);
ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
&end, EXTENT_DIRTY);
if (!ret && start == 0)
@@ -66,6 +66,7 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
else
i_size = 0;
inode->disk_i_size = i_size;
+out_unlock:
spin_unlock(&inode->lock);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 25833b4eeaf5..2fa36f694daa 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -454,7 +454,9 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
case BTRFS_EXCLOP_BALANCE_PAUSED:
spin_lock(&fs_info->super_lock);
ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
- fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD);
+ fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
+ fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
spin_unlock(&fs_info->super_lock);
break;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 6cb97efee976..0f2f915e42b0 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -826,7 +826,12 @@ out:
!btrfs_test_opt(info, CLEAR_CACHE)) {
btrfs_err(info, "cannot disable free space tree");
ret = -EINVAL;
-
+ }
+ if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
+ (btrfs_test_opt(info, CLEAR_CACHE) ||
+ !btrfs_test_opt(info, FREE_SPACE_TREE))) {
+ btrfs_err(info, "cannot disable free space tree with block-group-tree feature");
+ ret = -EINVAL;
}
if (!ret)
ret = btrfs_check_mountopts_zoned(info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 03f52e4a20aa..841e799dece5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -395,6 +395,7 @@ void btrfs_free_device(struct btrfs_device *device)
{
WARN_ON(!list_empty(&device->post_commit_list));
rcu_string_free(device->name);
+ extent_io_tree_release(&device->alloc_state);
btrfs_destroy_dev_zone_info(device);
kfree(device);
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index a9b32ba6b2ce..d51057608fc3 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1168,12 +1168,12 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
return -ERANGE;
/* All the zones are conventional */
- if (find_next_bit(zinfo->seq_zones, begin, end) == end)
+ if (find_next_bit(zinfo->seq_zones, end, begin) == end)
return 0;
/* All the zones are sequential and empty */
- if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
- find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
+ if (find_next_zero_bit(zinfo->seq_zones, end, begin) == end &&
+ find_next_zero_bit(zinfo->empty_zones, end, begin) == end)
return 0;
for (pos = start; pos < start + size; pos += zinfo->zone_size) {
diff --git a/fs/buffer.c b/fs/buffer.c
index a7fc561758b1..00cad2658a07 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -592,6 +592,76 @@ int sync_mapping_buffers(struct address_space *mapping)
}
EXPORT_SYMBOL(sync_mapping_buffers);
+/**
+ * generic_buffers_fsync_noflush - generic buffer fsync implementation
+ * for simple filesystems with no inode lock
+ *
+ * @file: file to synchronize
+ * @start: start offset in bytes
+ * @end: end offset in bytes (inclusive)
+ * @datasync: only synchronize essential metadata if true
+ *
+ * This is a generic implementation of the fsync method for simple
+ * filesystems which track all non-inode metadata in the buffers list
+ * hanging off the address_space structure.
+ */
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync)
+{
+ struct inode *inode = file->f_mapping->host;
+ int err;
+ int ret;
+
+ err = file_write_and_wait_range(file, start, end);
+ if (err)
+ return err;
+
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (!(inode->i_state & I_DIRTY_ALL))
+ goto out;
+ if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+ goto out;
+
+ err = sync_inode_metadata(inode, 1);
+ if (ret == 0)
+ ret = err;
+
+out:
+ /* check and advance again to catch errors after syncing out buffers */
+ err = file_check_and_advance_wb_err(file);
+ if (ret == 0)
+ ret = err;
+ return ret;
+}
+EXPORT_SYMBOL(generic_buffers_fsync_noflush);
+
+/**
+ * generic_buffers_fsync - generic buffer fsync implementation
+ * for simple filesystems with no inode lock
+ *
+ * @file: file to synchronize
+ * @start: start offset in bytes
+ * @end: end offset in bytes (inclusive)
+ * @datasync: only synchronize essential metadata if true
+ *
+ * This is a generic implementation of the fsync method for simple
+ * filesystems which track all non-inode metadata in the buffers list
+ * hanging off the address_space structure. This also makes sure that
+ * a device cache flush operation is called at the end.
+ */
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+
+ ret = generic_buffers_fsync_noflush(file, start, end, datasync);
+ if (!ret)
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
+ return ret;
+}
+EXPORT_SYMBOL(generic_buffers_fsync);
+
/*
* Called when we've recently written block `bblock', and it is known that
* `bblock' was for a buffer_boundary() buffer. This means that the block at
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 32f7c81a7b89..81430abacf93 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -246,7 +246,7 @@ cifs_read_super(struct super_block *sb)
if (cifs_sb->ctx->rasize)
sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
else
- sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
+ sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index a81758225fcd..a295e4c2d54e 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1682,7 +1682,7 @@ smb2_copychunk_range(const unsigned int xid,
pcchunk->SourceOffset = cpu_to_le64(src_off);
pcchunk->TargetOffset = cpu_to_le64(dest_off);
pcchunk->Length =
- cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
+ cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
/* Request server copy to target from src identified by key */
kfree(retbuf);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index e33ca0d33906..9ed61b6f9b21 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1947,6 +1947,9 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
init_copy_chunk_defaults(tcon);
if (server->ops->validate_negotiate)
rc = server->ops->validate_negotiate(xid, tcon);
+ if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */
+ if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT)
+ server->nosharesock = true;
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c16f0d660cb7..044863c0d824 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -441,10 +441,10 @@ int ecryptfs_encrypt_page(struct page *page)
}
lower_offset = lower_offset_for_page(crypt_stat, page);
- enc_extent_virt = kmap(enc_extent_page);
+ enc_extent_virt = kmap_local_page(enc_extent_page);
rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset,
PAGE_SIZE);
- kunmap(enc_extent_page);
+ kunmap_local(enc_extent_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to write lower page; rc = [%d]\n",
@@ -490,10 +490,10 @@ int ecryptfs_decrypt_page(struct page *page)
BUG_ON(!(crypt_stat->flags & ECRYPTFS_ENCRYPTED));
lower_offset = lower_offset_for_page(crypt_stat, page);
- page_virt = kmap(page);
+ page_virt = kmap_local_page(page);
rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE,
ecryptfs_inode);
- kunmap(page);
+ kunmap_local(page_virt);
if (rc < 0) {
ecryptfs_printk(KERN_ERR,
"Error attempting to read lower page; rc = [%d]\n",
@@ -1289,7 +1289,7 @@ out:
/**
* ecryptfs_read_xattr_region
- * @page_virt: The vitual address into which to read the xattr data
+ * @page_virt: The virtual address into which to read the xattr data
* @ecryptfs_inode: The eCryptfs inode
*
* Attempts to read the crypto metadata from the extended attribute
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3fe41964c0d8..2452d6fd7062 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -878,7 +878,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
* @filename: This function kmalloc's the memory for the filename
* @filename_size: This function sets this to the amount of memory
* kmalloc'd for the filename
- * @packet_size: This function sets this to the the number of octets
+ * @packet_size: This function sets this to the number of octets
* in the packet parsed
* @mount_crypt_stat: The mount-wide cryptographic context
* @data: The memory location containing the start of the tag 70
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 60bdcaddcbe5..5edf027c8359 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -64,11 +64,11 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT)
+ offset_in_page);
- virt = kmap(page_for_lower);
+ virt = kmap_local_page(page_for_lower);
rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size);
if (rc > 0)
rc = 0;
- kunmap(page_for_lower);
+ kunmap_local(virt);
return rc;
}
@@ -253,11 +253,11 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
int rc;
offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page);
- virt = kmap(page_for_ecryptfs);
+ virt = kmap_local_page(page_for_ecryptfs);
rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode);
if (rc > 0)
rc = 0;
- kunmap(page_for_ecryptfs);
+ kunmap_local(virt);
flush_dcache_page(page_for_ecryptfs);
return rc;
}
diff --git a/fs/ext2/Makefile b/fs/ext2/Makefile
index 311479d864a7..8860948ef9ca 100644
--- a/fs/ext2/Makefile
+++ b/fs/ext2/Makefile
@@ -6,7 +6,10 @@
obj-$(CONFIG_EXT2_FS) += ext2.o
ext2-y := balloc.o dir.o file.o ialloc.o inode.o \
- ioctl.o namei.o super.o symlink.o
+ ioctl.o namei.o super.o symlink.o trace.o
+
+# For tracepoints to include our trace.h from tracepoint infrastructure
+CFLAGS_trace.o := -I$(src)
ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
ext2-$(CONFIG_EXT2_FS_POSIX_ACL) += acl.o
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 8244366862e4..d0531d4ef499 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -754,6 +754,7 @@ extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
extern struct inode *ext2_iget (struct super_block *, unsigned long);
extern int ext2_write_inode (struct inode *, struct writeback_control *);
extern void ext2_evict_inode(struct inode *);
+void ext2_write_failed(struct address_space *mapping, loff_t to);
extern int ext2_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern int ext2_setattr (struct mnt_idmap *, struct dentry *, struct iattr *);
extern int ext2_getattr (struct mnt_idmap *, const struct path *,
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 6b4bebe982ca..7a32f202908e 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -25,9 +25,11 @@
#include <linux/quotaops.h>
#include <linux/iomap.h>
#include <linux/uio.h>
+#include <linux/buffer_head.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
+#include "trace.h"
#ifdef CONFIG_FS_DAX
static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
@@ -153,7 +155,7 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int ret;
struct super_block *sb = file->f_mapping->host->i_sb;
- ret = generic_file_fsync(file, start, end, datasync);
+ ret = generic_buffers_fsync(file, start, end, datasync);
if (ret == -EIO)
/* We don't really know where the IO error happened... */
ext2_error(sb, __func__,
@@ -161,12 +163,131 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return ret;
}
+static ssize_t ext2_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+
+ trace_ext2_dio_read_begin(iocb, to, 0);
+ inode_lock_shared(inode);
+ ret = iomap_dio_rw(iocb, to, &ext2_iomap_ops, NULL, 0, NULL, 0);
+ inode_unlock_shared(inode);
+ trace_ext2_dio_read_end(iocb, to, ret);
+
+ return ret;
+}
+
+static int ext2_dio_write_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ loff_t pos = iocb->ki_pos;
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (error)
+ goto out;
+
+ /*
+ * If we are extending the file, we have to update i_size here before
+ * page cache gets invalidated in iomap_dio_rw(). This prevents racing
+ * buffered reads from zeroing out too much from page cache pages.
+ * Note that all extending writes always happens synchronously with
+ * inode lock held by ext2_dio_write_iter(). So it is safe to update
+ * inode size here for extending file writes.
+ */
+ pos += size;
+ if (pos > i_size_read(inode)) {
+ i_size_write(inode, pos);
+ mark_inode_dirty(inode);
+ }
+out:
+ trace_ext2_dio_write_endio(iocb, size, error);
+ return error;
+}
+
+static const struct iomap_dio_ops ext2_dio_write_ops = {
+ .end_io = ext2_dio_write_end_io,
+};
+
+static ssize_t ext2_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ ssize_t ret;
+ unsigned int flags = 0;
+ unsigned long blocksize = inode->i_sb->s_blocksize;
+ loff_t offset = iocb->ki_pos;
+ loff_t count = iov_iter_count(from);
+ ssize_t status = 0;
+
+ trace_ext2_dio_write_begin(iocb, from, 0);
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out_unlock;
+
+ ret = kiocb_modified(iocb);
+ if (ret)
+ goto out_unlock;
+
+ /* use IOMAP_DIO_FORCE_WAIT for unaligned or extending writes */
+ if (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode) ||
+ (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(from), blocksize)))
+ flags |= IOMAP_DIO_FORCE_WAIT;
+
+ ret = iomap_dio_rw(iocb, from, &ext2_iomap_ops, &ext2_dio_write_ops,
+ flags, NULL, 0);
+
+ /* ENOTBLK is magic return value for fallback to buffered-io */
+ if (ret == -ENOTBLK)
+ ret = 0;
+
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ ext2_write_failed(inode->i_mapping, offset + count);
+
+ /* handle case for partial write and for fallback to buffered write */
+ if (ret >= 0 && iov_iter_count(from)) {
+ loff_t pos, endbyte;
+ int ret2;
+
+ iocb->ki_flags &= ~IOCB_DIRECT;
+ pos = iocb->ki_pos;
+ status = generic_perform_write(iocb, from);
+ if (unlikely(status < 0)) {
+ ret = status;
+ goto out_unlock;
+ }
+
+ iocb->ki_pos += status;
+ ret += status;
+ endbyte = pos + status - 1;
+ ret2 = filemap_write_and_wait_range(inode->i_mapping, pos,
+ endbyte);
+ if (!ret2)
+ invalidate_mapping_pages(inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ endbyte >> PAGE_SHIFT);
+ if (ret > 0)
+ generic_write_sync(iocb, ret);
+ }
+
+out_unlock:
+ inode_unlock(inode);
+ if (status)
+ trace_ext2_dio_write_buff_end(iocb, from, status);
+ trace_ext2_dio_write_end(iocb, from, ret);
+ return ret;
+}
+
static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
#ifdef CONFIG_FS_DAX
if (IS_DAX(iocb->ki_filp->f_mapping->host))
return ext2_dax_read_iter(iocb, to);
#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext2_dio_read_iter(iocb, to);
+
return generic_file_read_iter(iocb, to);
}
@@ -176,6 +297,9 @@ static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_DAX(iocb->ki_filp->f_mapping->host))
return ext2_dax_write_iter(iocb, from);
#endif
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return ext2_dio_write_iter(iocb, from);
+
return generic_file_write_iter(iocb, from);
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 26f135e7ffce..75983215c7a1 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -56,7 +56,7 @@ static inline int ext2_inode_is_fast_symlink(struct inode *inode)
static void ext2_truncate_blocks(struct inode *inode, loff_t offset);
-static void ext2_write_failed(struct address_space *mapping, loff_t to)
+void ext2_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
@@ -809,9 +809,27 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
bool new = false, boundary = false;
u32 bno;
int ret;
+ bool create = flags & IOMAP_WRITE;
+
+ /*
+ * For writes that could fill holes inside i_size on a
+ * DIO_SKIP_HOLES filesystem we forbid block creations: only
+ * overwrites are permitted.
+ */
+ if ((flags & IOMAP_DIRECT) &&
+ (first_block << blkbits) < i_size_read(inode))
+ create = 0;
+
+ /*
+ * Writes that span EOF might trigger an IO size update on completion,
+ * so consider them to be dirty for the purposes of O_DSYNC even if
+ * there is no other metadata changes pending or have been made here.
+ */
+ if ((flags & IOMAP_WRITE) && offset + length > i_size_read(inode))
+ iomap->flags |= IOMAP_F_DIRTY;
ret = ext2_get_blocks(inode, first_block, max_blocks,
- &bno, &new, &boundary, flags & IOMAP_WRITE);
+ &bno, &new, &boundary, create);
if (ret < 0)
return ret;
@@ -823,6 +841,12 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->bdev = inode->i_sb->s_bdev;
if (ret == 0) {
+ /*
+ * Switch to buffered-io for writing to holes in a non-extent
+ * based filesystem to avoid stale data exposure problem.
+ */
+ if (!create && (flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT))
+ return -ENOTBLK;
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
iomap->length = 1 << blkbits;
@@ -844,6 +868,13 @@ static int
ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length,
ssize_t written, unsigned flags, struct iomap *iomap)
{
+ /*
+ * Switch to buffered-io in case of any error.
+ * Blocks allocated can be used by the buffered-io path.
+ */
+ if ((flags & IOMAP_DIRECT) && (flags & IOMAP_WRITE) && written == 0)
+ return -ENOTBLK;
+
if (iomap->type == IOMAP_MAPPED &&
written < length &&
(flags & IOMAP_WRITE))
@@ -908,22 +939,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping,block,ext2_get_block);
}
-static ssize_t
-ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- size_t count = iov_iter_count(iter);
- loff_t offset = iocb->ki_pos;
- ssize_t ret;
-
- ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
- if (ret < 0 && iov_iter_rw(iter) == WRITE)
- ext2_write_failed(mapping, offset + count);
- return ret;
-}
-
static int
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
@@ -946,7 +961,7 @@ const struct address_space_operations ext2_aops = {
.write_begin = ext2_write_begin,
.write_end = ext2_write_end,
.bmap = ext2_bmap,
- .direct_IO = ext2_direct_IO,
+ .direct_IO = noop_direct_IO,
.writepages = ext2_writepages,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
@@ -1259,9 +1274,8 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
inode_dio_wait(inode);
if (IS_DAX(inode))
- error = dax_zero_range(inode, newsize,
- PAGE_ALIGN(newsize) - newsize, NULL,
- &ext2_iomap_ops);
+ error = dax_truncate_page(inode, newsize, NULL,
+ &ext2_iomap_ops);
else
error = block_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
diff --git a/fs/ext2/trace.c b/fs/ext2/trace.c
new file mode 100644
index 000000000000..b01cdf6526fd
--- /dev/null
+++ b/fs/ext2/trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "ext2.h"
+#include <linux/uio.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/ext2/trace.h b/fs/ext2/trace.h
new file mode 100644
index 000000000000..7d230e13576e
--- /dev/null
+++ b/fs/ext2/trace.h
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ext2
+
+#if !defined(_EXT2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _EXT2_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ext2_dio_class,
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter, ssize_t ret),
+ TP_ARGS(iocb, iter, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, isize)
+ __field(loff_t, pos)
+ __field(size_t, count)
+ __field(int, ki_flags)
+ __field(bool, aio)
+ __field(ssize_t, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = file_inode(iocb->ki_filp)->i_ino;
+ __entry->isize = file_inode(iocb->ki_filp)->i_size;
+ __entry->pos = iocb->ki_pos;
+ __entry->count = iov_iter_count(iter);
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->aio = !is_sync_kiocb(iocb);
+ __entry->ret = ret;
+ ),
+ TP_printk("dev %d:%d ino 0x%lx isize 0x%llx pos 0x%llx len %zu flags %s aio %d ret %zd",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->isize,
+ __entry->pos,
+ __entry->count,
+ __print_flags(__entry->ki_flags, "|", TRACE_IOCB_STRINGS),
+ __entry->aio,
+ __entry->ret)
+);
+
+#define DEFINE_DIO_RW_EVENT(name) \
+DEFINE_EVENT(ext2_dio_class, name, \
+ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter, ssize_t ret), \
+ TP_ARGS(iocb, iter, ret))
+DEFINE_DIO_RW_EVENT(ext2_dio_write_begin);
+DEFINE_DIO_RW_EVENT(ext2_dio_write_end);
+DEFINE_DIO_RW_EVENT(ext2_dio_write_buff_end);
+DEFINE_DIO_RW_EVENT(ext2_dio_read_begin);
+DEFINE_DIO_RW_EVENT(ext2_dio_read_end);
+
+TRACE_EVENT(ext2_dio_write_endio,
+ TP_PROTO(struct kiocb *iocb, ssize_t size, int ret),
+ TP_ARGS(iocb, size, ret),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, isize)
+ __field(loff_t, pos)
+ __field(ssize_t, size)
+ __field(int, ki_flags)
+ __field(bool, aio)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev;
+ __entry->ino = file_inode(iocb->ki_filp)->i_ino;
+ __entry->isize = file_inode(iocb->ki_filp)->i_size;
+ __entry->pos = iocb->ki_pos;
+ __entry->size = size;
+ __entry->ki_flags = iocb->ki_flags;
+ __entry->aio = !is_sync_kiocb(iocb);
+ __entry->ret = ret;
+ ),
+ TP_printk("dev %d:%d ino 0x%lx isize 0x%llx pos 0x%llx len %zd flags %s aio %d ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->isize,
+ __entry->pos,
+ __entry->size,
+ __print_flags(__entry->ki_flags, "|", TRACE_IOCB_STRINGS),
+ __entry->aio,
+ __entry->ret)
+);
+
+#endif /* _EXT2_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 094269488183..c1edde817be8 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -305,6 +305,38 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
+static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
+ ext4_group_t block_group,
+ struct buffer_head *bh)
+{
+ ext4_grpblk_t next_zero_bit;
+ unsigned long bitmap_size = sb->s_blocksize * 8;
+ unsigned int offset = num_clusters_in_group(sb, block_group);
+
+ if (bitmap_size <= offset)
+ return 0;
+
+ next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
+
+ return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
+}
+
+struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ ext4_group_t group)
+{
+ struct ext4_group_info **grp_info;
+ long indexv, indexh;
+
+ if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) {
+ ext4_error(sb, "invalid group %u", group);
+ return NULL;
+ }
+ indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
+ indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
+ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+ return grp_info[indexh];
+}
+
/*
* Return the block number which was discovered to be invalid, or 0 if
* the block bitmap is valid.
@@ -379,7 +411,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
if (buffer_verified(bh))
return 0;
- if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+ if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
@@ -402,6 +434,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EFSCORRUPTED;
}
+ blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
+ block_group, blk);
+ ext4_mark_group_bitmap_corrupted(sb, block_group,
+ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ return -EFSCORRUPTED;
+ }
set_buffer_verified(bh);
verified:
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 18cb2680dc39..7e8f66ba17f4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2625,6 +2625,8 @@ extern void ext4_check_blocks_bitmap(struct super_block *);
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
ext4_group_t block_group,
struct buffer_head ** bh);
+extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
+ ext4_group_t group);
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
@@ -3232,19 +3234,6 @@ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
raw_inode->i_size_high = cpu_to_le32(i_size >> 32);
}
-static inline
-struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
- ext4_group_t group)
-{
- struct ext4_group_info **grp_info;
- long indexv, indexh;
- BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
- indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
- indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
- grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
- return grp_info[indexh];
-}
-
/*
* Reading s_groups_count requires using smp_rmb() afterwards. See
* the locking protocol documented in the comments of ext4_group_add()
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 7bc221038c6c..595abb9e7d74 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -267,14 +267,12 @@ static void __es_find_extent_range(struct inode *inode,
/* see if the extent has been cached */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u) %llu %x\n",
- lblk, es1->es_lblk, es1->es_len,
- ext4_es_pblock(es1), ext4_es_status(es1));
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u) %llu %x\n",
+ lblk, es1->es_lblk, es1->es_len,
+ ext4_es_pblock(es1), ext4_es_status(es1));
+ goto out;
}
es1 = __es_tree_search(&tree->root, lblk);
@@ -293,7 +291,7 @@ out:
}
if (es1 && matching_fn(es1)) {
- tree->cache_es = es1;
+ WRITE_ONCE(tree->cache_es, es1);
es->es_lblk = es1->es_lblk;
es->es_len = es1->es_len;
es->es_pblk = es1->es_pblk;
@@ -931,14 +929,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
/* find extent in cache firstly */
es->es_lblk = es->es_len = es->es_pblk = 0;
- if (tree->cache_es) {
- es1 = tree->cache_es;
- if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u)\n",
- lblk, es1->es_lblk, es1->es_len);
- found = 1;
- goto out;
- }
+ es1 = READ_ONCE(tree->cache_es);
+ if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
+ es_debug("%u cached by [%u/%u)\n",
+ lblk, es1->es_lblk, es1->es_len);
+ found = 1;
+ goto out;
}
node = tree->root.rb_node;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index f65fdb27ce14..9cd71d76622d 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
#include "ext4.h"
#include "ext4_jbd2.h"
@@ -78,21 +79,13 @@ static int ext4_sync_parent(struct inode *inode)
return ret;
}
-static int ext4_fsync_nojournal(struct inode *inode, bool datasync,
- bool *needs_barrier)
+static int ext4_fsync_nojournal(struct file *file, loff_t start, loff_t end,
+ int datasync, bool *needs_barrier)
{
- int ret, err;
-
- ret = sync_mapping_buffers(inode->i_mapping);
- if (!(inode->i_state & I_DIRTY_ALL))
- return ret;
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- return ret;
-
- err = sync_inode_metadata(inode, 1);
- if (!ret)
- ret = err;
+ struct inode *inode = file->f_inode;
+ int ret;
+ ret = generic_buffers_fsync_noflush(file, start, end, datasync);
if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
@@ -148,6 +141,14 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
+ if (!sbi->s_journal) {
+ ret = ext4_fsync_nojournal(file, start, end, datasync,
+ &needs_barrier);
+ if (needs_barrier)
+ goto issue_flush;
+ goto out;
+ }
+
ret = file_write_and_wait_range(file, start, end);
if (ret)
goto out;
@@ -157,11 +158,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* Metadata is in the journal, we wait for proper transaction to
* commit here.
*/
- if (!sbi->s_journal)
- ret = ext4_fsync_nojournal(inode, datasync, &needs_barrier);
- else
- ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
+ ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
+issue_flush:
if (needs_barrier) {
err = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!ret)
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index 147b5241dd94..e43b32ef9380 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -277,7 +277,12 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
}
default:
hinfo->hash = 0;
- return -1;
+ hinfo->minor_hash = 0;
+ ext4_warning(dir->i_sb,
+ "invalid/unsupported hash tree version %u",
+ hinfo->hash_version);
+ WARN_ON_ONCE(1);
+ return -EINVAL;
}
hash = hash & ~1;
if (hash == (EXT4_HTREE_EOF_32BIT << 1))
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 787ab89c2c26..754f961cd9fd 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -91,7 +91,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
if (buffer_verified(bh))
return 0;
- if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
@@ -293,7 +293,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
}
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
grp = ext4_get_group_info(sb, block_group);
- if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
+ if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
fatal = -EFSCORRUPTED;
goto error_return;
}
@@ -1046,7 +1046,7 @@ got_group:
* Skip groups with already-known suspicious inode
* tables
*/
- if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
+ if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
goto next_group;
}
@@ -1183,6 +1183,10 @@ got:
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
grp = ext4_get_group_info(sb, group);
+ if (!grp) {
+ err = -EFSCORRUPTED;
+ goto out;
+ }
down_read(&grp->alloc_sem); /*
* protect vs itable
* lazyinit
@@ -1526,7 +1530,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
}
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
- if (!gdp)
+ if (!gdp || !grp)
goto out;
/*
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 859bc4e2c9b0..d3dfc51a43c5 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1175,6 +1175,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
ext4_initialize_dirent_tail(dir_block,
inode->i_sb->s_blocksize);
set_buffer_uptodate(dir_block);
+ unlock_buffer(dir_block);
err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
if (err)
return err;
@@ -1249,6 +1250,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
if (!S_ISDIR(inode->i_mode)) {
memcpy(data_bh->b_data, buf, inline_size);
set_buffer_uptodate(data_bh);
+ unlock_buffer(data_bh);
error = ext4_handle_dirty_metadata(handle,
inode, data_bh);
} else {
@@ -1256,7 +1258,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
buf, inline_size);
}
- unlock_buffer(data_bh);
out_restore:
if (error)
ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0d5ba922e411..19c884abe52b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3375,7 +3375,7 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
*/
flags &= ~IOMAP_WRITE;
ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
- WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
+ WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
return ret;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 78259bddbc4d..9c7881a4ea75 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -745,6 +745,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
grp = ext4_get_group_info(sb, e4b->bd_group);
+ if (!grp)
+ return NULL;
list_for_each(cur, &grp->bb_prealloc_list) {
ext4_group_t groupnr;
struct ext4_prealloc_space *pa;
@@ -1060,9 +1062,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
static noinline_for_stack
void ext4_mb_generate_buddy(struct super_block *sb,
- void *buddy, void *bitmap, ext4_group_t group)
+ void *buddy, void *bitmap, ext4_group_t group,
+ struct ext4_group_info *grp)
{
- struct ext4_group_info *grp = ext4_get_group_info(sb, group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_grpblk_t i = 0;
@@ -1181,6 +1183,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
break;
grinfo = ext4_get_group_info(sb, group);
+ if (!grinfo)
+ continue;
/*
* If page is uptodate then we came here after online resize
* which added some new uninitialized group info structs, so
@@ -1246,6 +1250,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
group, page->index, i * blocksize);
trace_ext4_mb_buddy_bitmap_load(sb, group);
grinfo = ext4_get_group_info(sb, group);
+ if (!grinfo) {
+ err = -EFSCORRUPTED;
+ goto out;
+ }
grinfo->bb_fragments = 0;
memset(grinfo->bb_counters, 0,
sizeof(*grinfo->bb_counters) *
@@ -1256,7 +1264,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
ext4_lock_group(sb, group);
/* init the buddy */
memset(data, 0xff, blocksize);
- ext4_mb_generate_buddy(sb, data, incore, group);
+ ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
ext4_unlock_group(sb, group);
incore = NULL;
} else {
@@ -1370,6 +1378,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
might_sleep();
mb_debug(sb, "init group %u\n", group);
this_grp = ext4_get_group_info(sb, group);
+ if (!this_grp)
+ return -EFSCORRUPTED;
+
/*
* This ensures that we don't reinit the buddy cache
* page which map to the group from which we are already
@@ -1444,6 +1455,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
grp = ext4_get_group_info(sb, group);
+ if (!grp)
+ return -EFSCORRUPTED;
e4b->bd_blkbits = sb->s_blocksize_bits;
e4b->bd_info = grp;
@@ -2159,6 +2172,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
struct ext4_free_extent ex;
+ if (!grp)
+ return -EFSCORRUPTED;
if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
return 0;
if (grp->bb_free == 0)
@@ -2385,7 +2400,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
BUG_ON(cr < 0 || cr >= 4);
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
return false;
free = grp->bb_free;
@@ -2454,6 +2469,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
ext4_grpblk_t free;
int ret = 0;
+ if (!grp)
+ return -EFSCORRUPTED;
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
if (should_lock) {
@@ -2534,7 +2551,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
* prefetch once, so we avoid getblk() call, which can
* be expensive.
*/
- if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
+ if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
EXT4_MB_GRP_NEED_INIT(grp) &&
ext4_free_group_clusters(sb, gdp) > 0 &&
!(ext4_has_group_desc_csum(sb) &&
@@ -2578,7 +2595,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
gdp = ext4_get_group_desc(sb, group, NULL);
grp = ext4_get_group_info(sb, group);
- if (EXT4_MB_GRP_NEED_INIT(grp) &&
+ if (grp && grp && EXT4_MB_GRP_NEED_INIT(grp) &&
ext4_free_group_clusters(sb, gdp) > 0 &&
!(ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
@@ -2837,6 +2854,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
sizeof(struct ext4_group_info);
grinfo = ext4_get_group_info(sb, group);
+ if (!grinfo)
+ return 0;
/* Load the group info in memory only if not already loaded. */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -2847,7 +2866,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
buddy_loaded = 1;
}
- memcpy(&sg, ext4_get_group_info(sb, group), i);
+ memcpy(&sg, grinfo, i);
if (buddy_loaded)
ext4_mb_unload_buddy(&e4b);
@@ -3208,8 +3227,12 @@ static int ext4_mb_init_backend(struct super_block *sb)
err_freebuddy:
cachep = get_groupinfo_cache(sb->s_blocksize_bits);
- while (i-- > 0)
- kmem_cache_free(cachep, ext4_get_group_info(sb, i));
+ while (i-- > 0) {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, i);
+
+ if (grp)
+ kmem_cache_free(cachep, grp);
+ }
i = sbi->s_group_info_size;
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
@@ -3522,6 +3545,8 @@ int ext4_mb_release(struct super_block *sb)
for (i = 0; i < ngroups; i++) {
cond_resched();
grinfo = ext4_get_group_info(sb, i);
+ if (!grinfo)
+ continue;
mb_group_bb_bitmap_free(grinfo);
ext4_lock_group(sb, i);
count = ext4_mb_cleanup_pa(grinfo);
@@ -4606,6 +4631,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
struct ext4_free_data *entry;
grp = ext4_get_group_info(sb, group);
+ if (!grp)
+ return;
n = rb_first(&(grp->bb_free_root));
while (n) {
@@ -4633,6 +4660,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
int preallocated = 0;
int len;
+ if (!grp)
+ return;
+
/* all form of preallocation discards first load group,
* so the only competing code is preallocation use.
* we don't need any locking here
@@ -4869,6 +4899,8 @@ adjust_bex:
ei = EXT4_I(ac->ac_inode);
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ if (!grp)
+ return;
pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock;
pa->pa_inode = ac->ac_inode;
@@ -4918,6 +4950,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
+ if (!grp)
+ return;
lg = ac->ac_lg;
BUG_ON(lg == NULL);
@@ -5013,7 +5047,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
trace_ext4_mb_release_group_pa(sb, pa);
BUG_ON(pa->pa_deleted == 0);
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
- BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
+ if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
+ ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
+ e4b->bd_group, group, pa->pa_pstart);
+ return 0;
+ }
mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
@@ -5043,6 +5081,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
int err;
int free = 0;
+ if (!grp)
+ return 0;
mb_debug(sb, "discard preallocation for group %u\n", group);
if (list_empty(&grp->bb_prealloc_list))
goto out_dbg;
@@ -5222,6 +5262,14 @@ repeat:
list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
BUG_ON(pa->pa_type != MB_INODE_PA);
group = ext4_get_group_number(sb, pa->pa_pstart);
+ if (pa->pa_pstart > ext4_blocks_count(EXT4_SB(sb)->s_es) ||
+ group >= EXT4_SB(sb)->s_groups_count) {
+ ext4_warning(sb, "pa_pstart %llu invalid max %llu first_data_block %u group %u",
+ pa->pa_pstart,
+ ext4_blocks_count(EXT4_SB(sb)->s_es),
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block),
+ group);
+ }
err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL);
@@ -5297,6 +5345,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
struct ext4_prealloc_space *pa;
ext4_grpblk_t start;
struct list_head *cur;
+
+ if (!grp)
+ continue;
ext4_lock_group(sb, i);
list_for_each(cur, &grp->bb_prealloc_list) {
pa = list_entry(cur, struct ext4_prealloc_space,
@@ -6064,6 +6115,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb;
struct ext4_group_desc *gdp;
+ struct ext4_group_info *grp;
unsigned int overflow;
ext4_grpblk_t bit;
struct buffer_head *gd_bh;
@@ -6089,8 +6141,8 @@ do_more:
overflow = 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
- if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
- ext4_get_group_info(sb, block_group))))
+ grp = ext4_get_group_info(sb, block_group);
+ if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
return;
/*
@@ -6692,6 +6744,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
for (group = first_group; group <= last_group; group++) {
grp = ext4_get_group_info(sb, group);
+ if (!grp)
+ continue;
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
ret = ext4_mb_init_group(sb, group, GFP_NOFS);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 4022bc713421..0aaf38ffcb6e 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -39,28 +39,36 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
* Write the MMP block using REQ_SYNC to try to get the block on-disk
* faster.
*/
-static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+static int write_mmp_block_thawed(struct super_block *sb,
+ struct buffer_head *bh)
{
struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
- /*
- * We protect against freezing so that we don't create dirty buffers
- * on frozen filesystem.
- */
- sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
- sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh)))
return -EIO;
-
return 0;
}
+static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
+{
+ int err;
+
+ /*
+ * We protect against freezing so that we don't create dirty buffers
+ * on frozen filesystem.
+ */
+ sb_start_write(sb);
+ err = write_mmp_block_thawed(sb, bh);
+ sb_end_write(sb);
+ return err;
+}
+
/*
* Read the MMP block. It _must_ be read from disk and hence we clear the
* uptodate flag on the buffer.
@@ -344,7 +352,11 @@ skip:
seq = mmp_new_seq();
mmp->mmp_seq = cpu_to_le32(seq);
- retval = write_mmp_block(sb, bh);
+ /*
+ * On mount / remount we are protected against fs freezing (by s_umount
+ * semaphore) and grabbing freeze protection upsets lockdep
+ */
+ retval = write_mmp_block_thawed(sb, bh);
if (retval)
goto failed;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a5010b5b8a8c..45b579805c95 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -674,7 +674,7 @@ static struct stats dx_show_leaf(struct inode *dir,
len = de->name_len;
if (!IS_ENCRYPTED(dir)) {
/* Directory is not encrypted */
- ext4fs_dirhash(dir, de->name,
+ (void) ext4fs_dirhash(dir, de->name,
de->name_len, &h);
printk("%*.s:(U)%x.%u ", len,
name, h.hash,
@@ -709,8 +709,9 @@ static struct stats dx_show_leaf(struct inode *dir,
if (IS_CASEFOLDED(dir))
h.hash = EXT4_DIRENT_HASH(de);
else
- ext4fs_dirhash(dir, de->name,
- de->name_len, &h);
+ (void) ext4fs_dirhash(dir,
+ de->name,
+ de->name_len, &h);
printk("%*.s:(E)%x.%u ", len, name,
h.hash, (unsigned) ((char *) de
- base));
@@ -720,7 +721,8 @@ static struct stats dx_show_leaf(struct inode *dir,
#else
int len = de->name_len;
char *name = de->name;
- ext4fs_dirhash(dir, de->name, de->name_len, &h);
+ (void) ext4fs_dirhash(dir, de->name,
+ de->name_len, &h);
printk("%*.s:%x.%u ", len, name, h.hash,
(unsigned) ((char *) de - base));
#endif
@@ -849,8 +851,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
/* hash is already computed for encrypted casefolded directory */
if (fname && fname_name(fname) &&
- !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)))
- ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
+ !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) {
+ int ret = ext4fs_dirhash(dir, fname_name(fname),
+ fname_len(fname), hinfo);
+ if (ret < 0) {
+ ret_err = ERR_PTR(ret);
+ goto fail;
+ }
+ }
hash = hinfo->hash;
if (root->info.unused_flags & 1) {
@@ -1111,7 +1119,12 @@ static int htree_dirblock_to_tree(struct file *dir_file,
hinfo->minor_hash = 0;
}
} else {
- ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+ err = ext4fs_dirhash(dir, de->name,
+ de->name_len, hinfo);
+ if (err < 0) {
+ count = err;
+ goto errout;
+ }
}
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
@@ -1313,8 +1326,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
if (de->name_len && de->inode) {
if (ext4_hash_in_dirent(dir))
h.hash = EXT4_DIRENT_HASH(de);
- else
- ext4fs_dirhash(dir, de->name, de->name_len, &h);
+ else {
+ int err = ext4fs_dirhash(dir, de->name,
+ de->name_len, &h);
+ if (err < 0)
+ return err;
+ }
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
@@ -1452,10 +1469,9 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
hinfo->hash_version = DX_HASH_SIPHASH;
hinfo->seed = NULL;
if (cf_name->name)
- ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
+ return ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
else
- ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
- return 0;
+ return ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
}
#endif
@@ -2298,10 +2314,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
/* casefolded encrypted hashes are computed on fname setup */
- if (!ext4_hash_in_dirent(dir))
- ext4fs_dirhash(dir, fname_name(fname),
- fname_len(fname), &fname->hinfo);
-
+ if (!ext4_hash_in_dirent(dir)) {
+ int err = ext4fs_dirhash(dir, fname_name(fname),
+ fname_len(fname), &fname->hinfo);
+ if (err < 0) {
+ brelse(bh2);
+ brelse(bh);
+ return err;
+ }
+ }
memset(frames, 0, sizeof(frames));
frame = frames;
frame->entries = entries;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d39f386e9baf..495f99c10085 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1048,6 +1048,8 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
int ret;
+ if (!grp || !gdp)
+ return;
if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
&grp->bb_state);
@@ -6387,6 +6389,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
struct ext4_mount_options old_opts;
ext4_group_t g;
int err = 0;
+ int enable_rw = 0;
#ifdef CONFIG_QUOTA
int enable_quota = 0;
int i, j;
@@ -6573,7 +6576,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
if (err)
goto restore_opts;
- sb->s_flags &= ~SB_RDONLY;
+ enable_rw = 1;
if (ext4_has_feature_mmp(sb)) {
err = ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block));
@@ -6616,9 +6619,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
}
#ifdef CONFIG_QUOTA
- /* Release old quota file names */
- for (i = 0; i < EXT4_MAXQUOTAS; i++)
- kfree(old_opts.s_qf_names[i]);
if (enable_quota) {
if (sb_any_quota_suspended(sb))
dquot_resume(sb, -1);
@@ -6628,16 +6628,29 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
goto restore_opts;
}
}
+ /* Release old quota file names */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++)
+ kfree(old_opts.s_qf_names[i]);
#endif
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
+ if (enable_rw)
+ sb->s_flags &= ~SB_RDONLY;
+
if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
ext4_stop_mmpd(sbi);
return 0;
restore_opts:
+ /*
+ * If there was a failing r/w to ro transition, we may need to
+ * re-enable quota
+ */
+ if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
+ sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
sb->s_flags = old_sb_flags;
sbi->s_mount_opt = old_opts.s_mount_opt;
sbi->s_mount_opt2 = old_opts.s_mount_opt2;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index dadad29bd81b..dfc2e223bd10 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -2614,6 +2614,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
.in_inode = !!entry->e_value_inum,
};
struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
+ int needs_kvfree = 0;
int error;
is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
@@ -2636,7 +2637,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
error = -ENOMEM;
goto out;
}
-
+ needs_kvfree = 1;
error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
if (error)
goto out;
@@ -2675,7 +2676,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
out:
kfree(b_entry_name);
- if (entry->e_value_inum && buffer)
+ if (needs_kvfree && buffer)
kvfree(buffer);
if (is)
brelse(is->iloc.bh);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 64b3860f50ee..8fd3b7f9fb88 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -30,12 +30,9 @@ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io,
unsigned char reason)
{
f2fs_build_fault_attr(sbi, 0, 0);
- set_ckpt_flags(sbi, CP_ERROR_FLAG);
- if (!end_io) {
+ if (!end_io)
f2fs_flush_merged_writes(sbi);
-
- f2fs_handle_stop(sbi, reason);
- }
+ f2fs_handle_critical_error(sbi, reason, end_io);
}
/*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 7165b1202f53..7dd92a9028b1 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2173,7 +2173,6 @@ submit_and_realloc:
f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
F2FS_BLKSIZE);
*last_block_in_bio = block_nr;
- goto out;
out:
*bio_ret = bio;
return ret;
@@ -2807,6 +2806,10 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
if (S_ISDIR(inode->i_mode) &&
!is_sbi_flag_set(sbi, SBI_IS_CLOSE))
goto redirty_out;
+
+ /* keep data pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ goto redirty_out;
goto out;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index d211ee89c158..7afc9aef127a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -162,6 +162,7 @@ struct f2fs_mount_info {
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
int memory_mode; /* memory mode */
+ int errors; /* errors parameter */
int discard_unit; /*
* discard command's offset/size should
* be aligned to this unit: block,
@@ -1370,6 +1371,12 @@ enum {
MEMORY_MODE_LOW, /* memory mode for low memry devices */
};
+enum errors_option {
+ MOUNT_ERRORS_READONLY, /* remount fs ro on errors */
+ MOUNT_ERRORS_CONTINUE, /* continue on errors */
+ MOUNT_ERRORS_PANIC, /* panic on errors */
+};
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1721,8 +1728,14 @@ struct f2fs_sb_info {
struct workqueue_struct *post_read_wq; /* post read workqueue */
- unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
- spinlock_t error_lock; /* protect errors array */
+ /*
+ * If we are in irq context, let's update error information into
+ * on-disk superblock in the work.
+ */
+ struct work_struct s_error_work;
+ unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */
+ unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */
+ spinlock_t error_lock; /* protect errors/stop_reason array */
bool error_dirty; /* errors of sb is dirty */
struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
@@ -3541,8 +3554,9 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
int f2fs_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
void f2fs_quota_off_umount(struct super_block *sb);
-void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ bool irq_context);
void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
int f2fs_sync_fs(struct super_block *sb, int sync);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 5ac53d2627d2..78aa8cff4b41 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -2225,7 +2225,6 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
ret = 0;
f2fs_stop_checkpoint(sbi, false,
STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
trace_f2fs_shutdown(sbi, in, ret);
}
return ret;
@@ -2238,7 +2237,6 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
thaw_bdev(sb->s_bdev);
break;
case F2FS_GOING_DOWN_METASYNC:
@@ -2247,16 +2245,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
- set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NEED_FSCK:
set_sbi_flag(sbi, SBI_NEED_FSCK);
@@ -2593,6 +2588,11 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
inode_lock(inode);
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ err = -EINVAL;
+ goto unlock_out;
+ }
+
/* if in-place-update policy is enabled, don't waste time here */
set_inode_flag(inode, FI_OPU_WRITE);
if (f2fs_should_update_inplace(inode, NULL)) {
@@ -2717,6 +2717,7 @@ clear_out:
clear_inode_flag(inode, FI_SKIP_WRITES);
out:
clear_inode_flag(inode, FI_OPU_WRITE);
+unlock_out:
inode_unlock(inode);
if (!err)
range->len = (u64)total << PAGE_SHIFT;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 61c5f9d26018..d455140322a8 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -59,7 +59,7 @@ static int gc_thread_func(void *data)
if (gc_th->gc_wake)
gc_th->gc_wake = false;
- if (try_to_freeze()) {
+ if (try_to_freeze() || f2fs_readonly(sbi->sb)) {
stat_other_skip_bggc_count(sbi);
continue;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index bd1dad523796..4a105a0cd794 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1596,6 +1596,9 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
trace_f2fs_writepage(page, NODE);
if (unlikely(f2fs_cp_error(sbi))) {
+ /* keep node pages in remount-ro mode */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ goto redirty_out;
ClearPageUptodate(page);
dec_page_count(sbi, F2FS_DIRTY_NODES);
unlock_page(page);
@@ -2063,7 +2066,6 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
struct list_head *head = &sbi->fsync_node_list;
unsigned long flags;
unsigned int cur_seq_id = 0;
- int ret2, ret = 0;
while (seq_id && cur_seq_id < seq_id) {
spin_lock_irqsave(&sbi->fsync_node_lock, flags);
@@ -2084,16 +2086,9 @@ int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
f2fs_wait_on_page_writeback(page, NODE, true, false);
put_page(page);
-
- if (ret)
- break;
}
- ret2 = filemap_check_errors(NODE_MAPPING(sbi));
- if (!ret)
- ret = ret2;
-
- return ret;
+ return filemap_check_errors(NODE_MAPPING(sbi));
}
static int f2fs_write_node_pages(struct address_space *mapping,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 9f15b03037db..51812f459581 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -164,6 +164,7 @@ enum {
Opt_discard_unit,
Opt_memory_mode,
Opt_age_extent_cache,
+ Opt_errors,
Opt_err,
};
@@ -243,6 +244,7 @@ static match_table_t f2fs_tokens = {
{Opt_discard_unit, "discard_unit=%s"},
{Opt_memory_mode, "memory=%s"},
{Opt_age_extent_cache, "age_extent_cache"},
+ {Opt_errors, "errors=%s"},
{Opt_err, NULL},
};
@@ -1268,6 +1270,25 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
case Opt_age_extent_cache:
set_opt(sbi, AGE_EXTENT_CACHE);
break;
+ case Opt_errors:
+ name = match_strdup(&args[0]);
+ if (!name)
+ return -ENOMEM;
+ if (!strcmp(name, "remount-ro")) {
+ F2FS_OPTION(sbi).errors =
+ MOUNT_ERRORS_READONLY;
+ } else if (!strcmp(name, "continue")) {
+ F2FS_OPTION(sbi).errors =
+ MOUNT_ERRORS_CONTINUE;
+ } else if (!strcmp(name, "panic")) {
+ F2FS_OPTION(sbi).errors =
+ MOUNT_ERRORS_PANIC;
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
default:
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
p);
@@ -1622,6 +1643,9 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_node_manager(sbi);
f2fs_destroy_segment_manager(sbi);
+ /* flush s_error_work before sbi destroy */
+ flush_work(&sbi->s_error_work);
+
f2fs_destroy_post_read_wq(sbi);
kvfree(sbi->ckpt);
@@ -2052,6 +2076,13 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
seq_printf(seq, ",memory=%s", "low");
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
+ seq_printf(seq, ",errors=%s", "remount-ro");
+ else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE)
+ seq_printf(seq, ",errors=%s", "continue");
+ else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC)
+ seq_printf(seq, ",errors=%s", "panic");
+
return 0;
}
@@ -2080,6 +2111,7 @@ static void default_options(struct f2fs_sb_info *sbi)
}
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
+ F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
sbi->sb->s_flags &= ~SB_INLINECRYPT;
@@ -2281,6 +2313,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if (err)
goto restore_opts;
+ /* flush outstanding errors before changing fs state */
+ flush_work(&sbi->s_error_work);
+
/*
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
@@ -3926,45 +3961,60 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
return err;
}
-void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
+static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
+ sbi->stop_reason[reason]++;
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
+}
+
+static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ unsigned long flags;
int err;
f2fs_down_write(&sbi->sb_lock);
- if (raw_super->s_stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
- raw_super->s_stop_reason[reason]++;
+ spin_lock_irqsave(&sbi->error_lock, flags);
+ memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
err = f2fs_commit_super(sbi, false);
- if (err)
- f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d",
- reason, err);
+
f2fs_up_write(&sbi->sb_lock);
+ if (err)
+ f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err);
}
void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
{
- spin_lock(&sbi->error_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->error_lock, flags);
if (!test_bit(flag, (unsigned long *)sbi->errors)) {
set_bit(flag, (unsigned long *)sbi->errors);
sbi->error_dirty = true;
}
- spin_unlock(&sbi->error_lock);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
}
static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
{
+ unsigned long flags;
bool need_update = false;
- spin_lock(&sbi->error_lock);
+ spin_lock_irqsave(&sbi->error_lock, flags);
if (sbi->error_dirty) {
memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
MAX_F2FS_ERRORS);
sbi->error_dirty = false;
need_update = true;
}
- spin_unlock(&sbi->error_lock);
+ spin_unlock_irqrestore(&sbi->error_lock, flags);
return need_update;
}
@@ -3988,6 +4038,66 @@ out_unlock:
f2fs_up_write(&sbi->sb_lock);
}
+static bool system_going_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
+void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
+ bool irq_context)
+{
+ struct super_block *sb = sbi->sb;
+ bool shutdown = reason == STOP_CP_REASON_SHUTDOWN;
+ bool continue_fs = !shutdown &&
+ F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE;
+
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+
+ if (!f2fs_hw_is_readonly(sbi)) {
+ save_stop_reason(sbi, reason);
+
+ if (irq_context && !shutdown)
+ schedule_work(&sbi->s_error_work);
+ else
+ f2fs_record_stop_reason(sbi);
+ }
+
+ /*
+ * We force ERRORS_RO behavior when system is rebooting. Otherwise we
+ * could panic during 'reboot -f' as the underlying device got already
+ * disabled.
+ */
+ if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC &&
+ !shutdown && !system_going_down() &&
+ !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN))
+ panic("F2FS-fs (device %s): panic forced after error\n",
+ sb->s_id);
+
+ if (shutdown)
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+
+ /* continue filesystem operators if errors=continue */
+ if (continue_fs || f2fs_readonly(sb))
+ return;
+
+ f2fs_warn(sbi, "Remounting filesystem read-only");
+ /*
+ * Make sure updated value of ->s_mount_flags will be visible before
+ * ->s_flags update
+ */
+ smp_wmb();
+ sb->s_flags |= SB_RDONLY;
+}
+
+static void f2fs_record_error_work(struct work_struct *work)
+{
+ struct f2fs_sb_info *sbi = container_of(work,
+ struct f2fs_sb_info, s_error_work);
+
+ f2fs_record_stop_reason(sbi);
+}
+
static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
@@ -4218,7 +4328,9 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ INIT_WORK(&sbi->s_error_work, f2fs_record_error_work);
memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
+ memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON);
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sbi))
@@ -4615,6 +4727,8 @@ free_sm:
f2fs_destroy_segment_manager(sbi);
stop_ckpt_thread:
f2fs_stop_ckpt_thread(sbi);
+ /* flush s_error_work before sbi destroy */
+ flush_work(&sbi->s_error_work);
f2fs_destroy_post_read_wq(sbi);
free_devices:
destroy_device_list(sbi);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 8ea05340bad9..467d743c801f 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -1386,12 +1386,19 @@ int __init f2fs_init_sysfs(void)
ret = kobject_init_and_add(&f2fs_feat, &f2fs_feat_ktype,
NULL, "features");
- if (ret) {
- kobject_put(&f2fs_feat);
- kset_unregister(&f2fs_kset);
- } else {
- f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ if (ret)
+ goto put_kobject;
+
+ f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ if (!f2fs_proc_root) {
+ ret = -ENOMEM;
+ goto put_kobject;
}
+
+ return 0;
+put_kobject:
+ kobject_put(&f2fs_feat);
+ kset_unregister(&f2fs_kset);
return ret;
}
@@ -1430,23 +1437,24 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
if (err)
goto put_feature_list_kobj;
- if (f2fs_proc_root)
- sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+ sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+ if (!sbi->s_proc) {
+ err = -ENOMEM;
+ goto put_feature_list_kobj;
+ }
- if (sbi->s_proc) {
- proc_create_single_data("segment_info", 0444, sbi->s_proc,
+ proc_create_single_data("segment_info", 0444, sbi->s_proc,
segment_info_seq_show, sb);
- proc_create_single_data("segment_bits", 0444, sbi->s_proc,
+ proc_create_single_data("segment_bits", 0444, sbi->s_proc,
segment_bits_seq_show, sb);
#ifdef CONFIG_F2FS_IOSTAT
- proc_create_single_data("iostat_info", 0444, sbi->s_proc,
+ proc_create_single_data("iostat_info", 0444, sbi->s_proc,
iostat_info_seq_show, sb);
#endif
- proc_create_single_data("victim_bits", 0444, sbi->s_proc,
+ proc_create_single_data("victim_bits", 0444, sbi->s_proc,
victim_bits_seq_show, sb);
- proc_create_single_data("discard_plist_info", 0444, sbi->s_proc,
+ proc_create_single_data("discard_plist_info", 0444, sbi->s_proc,
discard_plist_seq_show, sb);
- }
return 0;
put_feature_list_kobj:
kobject_put(&sbi->s_feature_list_kobj);
@@ -1462,8 +1470,7 @@ put_sb_kobj:
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
{
- if (sbi->s_proc)
- remove_proc_subtree(sbi->sb->s_id, f2fs_proc_root);
+ remove_proc_subtree(sbi->sb->s_id, f2fs_proc_root);
kobject_put(&sbi->s_stat_kobj);
wait_for_completion(&sbi->s_stat_kobj_unregister);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ae4e51e91ee3..aca4b4811394 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2024,7 +2024,6 @@ static long wb_writeback(struct bdi_writeback *wb,
struct blk_plug plug;
blk_start_plug(&plug);
- spin_lock(&wb->list_lock);
for (;;) {
/*
* Stop writeback when nr_pages has been consumed
@@ -2049,6 +2048,9 @@ static long wb_writeback(struct bdi_writeback *wb,
if (work->for_background && !wb_over_bg_thresh(wb))
break;
+
+ spin_lock(&wb->list_lock);
+
/*
* Kupdate and background works are special and we want to
* include all inodes that need writing. Livelock avoidance is
@@ -2078,13 +2080,19 @@ static long wb_writeback(struct bdi_writeback *wb,
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
- if (progress)
+ if (progress) {
+ spin_unlock(&wb->list_lock);
continue;
+ }
+
/*
* No more inodes for IO, bail
*/
- if (list_empty(&wb->b_more_io))
+ if (list_empty(&wb->b_more_io)) {
+ spin_unlock(&wb->list_lock);
break;
+ }
+
/*
* Nothing written. Wait for some inode to
* become available for writeback. Otherwise
@@ -2096,9 +2104,7 @@ static long wb_writeback(struct bdi_writeback *wb,
spin_unlock(&wb->list_lock);
/* This function drops i_lock... */
inode_sleep_on_writeback(inode);
- spin_lock(&wb->list_lock);
}
- spin_unlock(&wb->list_lock);
blk_finish_plug(&plug);
return nr_pages - work->nr_pages;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 1a8f82f478cb..b891468dee06 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -700,7 +700,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
struct pipe_buffer *buf = cs->pipebufs;
if (!cs->write) {
- err = pipe_buf_confirm(cs->pipe, buf);
+ err = pipe_buf_confirm(cs->pipe, buf, false);
if (err)
return err;
@@ -800,7 +800,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
fuse_copy_finish(cs);
- err = pipe_buf_confirm(cs->pipe, buf);
+ err = pipe_buf_confirm(cs->pipe, buf, false);
if (err)
goto out_put_old;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 35bc174f9ba2..5a4a7155cf1c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -395,8 +395,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
goto out_put_forget;
err = -EIO;
- if (!outarg->nodeid)
- goto out_put_forget;
if (fuse_invalid_attr(&outarg->attr))
goto out_put_forget;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index d66070af145d..660be31aaabc 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1254,7 +1254,8 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
- FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP;
+ FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
+ FUSE_HAS_EXPIRE_ONLY;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index ecfdfb2529a3..90361a922cec 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -834,9 +834,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
break;
}
- /* Set numa allocation policy based on index */
- hugetlb_set_vma_policy(&pseudo_vma, inode, index);
-
/* addr is the offset within the file (zero based) */
addr = index * hpage_size;
@@ -850,7 +847,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
rcu_read_unlock();
if (present) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- hugetlb_drop_vma_policy(&pseudo_vma);
continue;
}
@@ -862,6 +858,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* folios in these areas, we need to consume the reserves
* to keep reservation accounting consistent.
*/
+ hugetlb_set_vma_policy(&pseudo_vma, inode, index);
folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0);
hugetlb_drop_vma_policy(&pseudo_vma);
if (IS_ERR(folio)) {
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index 4ed379f9b1aa..4882a812ea86 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -351,7 +351,8 @@ int ksmbd_conn_handler_loop(void *p)
break;
/* 4 for rfc1002 length field */
- size = pdu_size + 4;
+ /* 1 for implied bcc[0] */
+ size = pdu_size + 4 + 1;
conn->request_buf = kvmalloc(size, GFP_KERNEL);
if (!conn->request_buf)
break;
diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
index f07a05f37651..408cddf2f094 100644
--- a/fs/ksmbd/mgmt/tree_connect.c
+++ b/fs/ksmbd/mgmt/tree_connect.c
@@ -120,17 +120,6 @@ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
return tcon;
}
-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
- unsigned int id)
-{
- struct ksmbd_tree_connect *tc;
-
- tc = ksmbd_tree_conn_lookup(sess, id);
- if (tc)
- return tc->share_conf;
- return NULL;
-}
-
int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
{
int ret = 0;
diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
index 700df36cf3e3..562d647ad9fa 100644
--- a/fs/ksmbd/mgmt/tree_connect.h
+++ b/fs/ksmbd/mgmt/tree_connect.h
@@ -53,9 +53,6 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
unsigned int id);
-struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
- unsigned int id);
-
int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess);
#endif /* __TREE_CONNECT_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 2e54ded4d92c..5e09834016bb 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -1492,7 +1492,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
return ERR_PTR(-EINVAL);
name = (char *)cc + name_off;
- if (memcmp(name, tag, name_len) == 0)
+ if (!strcmp(name, tag))
return cc;
remain_len -= next;
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index fbdde426dd01..0ffe663b7590 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -416,8 +416,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
/*
* Allow a message that padded to 8byte boundary.
+ * Linux 4.19.217 with smb 3.0.2 are sometimes
+ * sending messages where the cls_len is exactly
+ * 8 bytes less than len.
*/
- if (clc_len < len && (len - clc_len) < 8)
+ if (clc_len < len && (len - clc_len) <= 8)
goto validate_credit;
pr_err_ratelimited(
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index cb93fd231f4e..8de8afd473ae 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -1356,7 +1356,7 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
struct authenticate_message *authblob;
struct ksmbd_user *user;
char *name;
- unsigned int auth_msg_len, name_off, name_len, secbuf_len;
+ unsigned int name_off, name_len, secbuf_len;
secbuf_len = le16_to_cpu(req->SecurityBufferLength);
if (secbuf_len < sizeof(struct authenticate_message)) {
@@ -1366,9 +1366,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
authblob = user_authblob(conn, req);
name_off = le32_to_cpu(authblob->UserName.BufferOffset);
name_len = le16_to_cpu(authblob->UserName.Length);
- auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
- if (auth_msg_len < (u64)name_off + name_len)
+ if (secbuf_len < (u64)name_off + name_len)
return NULL;
name = smb_strndup_from_utf16((const char *)authblob + name_off,
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index af0c2a9b8529..c6e4d38319df 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -347,8 +347,8 @@ static int smb1_check_user_session(struct ksmbd_work *work)
*/
static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
{
- work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
- GFP_KERNEL | __GFP_ZERO);
+ work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
+ GFP_KERNEL);
work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
if (!work->response_buf) {
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index bb94949bc223..04ba95b83d16 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -77,9 +77,9 @@ static const unsigned long nlm_grace_period_min = 0;
static const unsigned long nlm_grace_period_max = 240;
static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
-static const int nlm_port_min = 0, nlm_port_max = 65535;
#ifdef CONFIG_SYSCTL
+static const int nlm_port_min = 0, nlm_port_max = 65535;
static struct ctl_table_header * nlm_sysctl_table;
#endif
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7b8f17ee5224..c159817d1282 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -153,18 +153,6 @@ static int exports_net_open(struct net *net, struct file *file)
return 0;
}
-static int exports_proc_open(struct inode *inode, struct file *file)
-{
- return exports_net_open(current->nsproxy->net_ns, file);
-}
-
-static const struct proc_ops exports_proc_ops = {
- .proc_open = exports_proc_open,
- .proc_read = seq_read,
- .proc_lseek = seq_lseek,
- .proc_release = seq_release,
-};
-
static int exports_nfsd_open(struct inode *inode, struct file *file)
{
return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -1458,6 +1446,19 @@ static struct file_system_type nfsd_fs_type = {
MODULE_ALIAS_FS("nfsd");
#ifdef CONFIG_PROC_FS
+
+static int exports_proc_open(struct inode *inode, struct file *file)
+{
+ return exports_net_open(current->nsproxy->net_ns, file);
+}
+
+static const struct proc_ops exports_proc_ops = {
+ .proc_open = exports_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+};
+
static int create_proc_exports_entry(void)
{
struct proc_dir_entry *entry;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 49cfe2ae6d23..993375f0db67 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -65,7 +65,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
struct fsnotify_event *fsn_event;
struct fsnotify_group *group = inode_mark->group;
int ret;
- int len = 0;
+ int len = 0, wd;
int alloc_len = sizeof(struct inotify_event_info);
struct mem_cgroup *old_memcg;
@@ -81,6 +81,13 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
fsn_mark);
/*
+ * We can be racing with mark being detached. Don't report event with
+ * invalid wd.
+ */
+ wd = READ_ONCE(i_mark->wd);
+ if (wd == -1)
+ return 0;
+ /*
* Whoever is interested in the event, pays for the allocation. Do not
* trigger OOM killer in the target monitoring memcg as it may have
* security repercussion.
@@ -110,7 +117,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
fsn_event = &event->fse;
fsnotify_init_event(fsn_event);
event->mask = mask;
- event->wd = i_mark->wd;
+ event->wd = wd;
event->sync_cookie = cookie;
event->name_len = len;
if (len)
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 48030899dc6e..34102fe63c0e 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -1955,36 +1955,40 @@ undo_alloc:
"attribute.%s", es);
NVolSetErrors(vol);
}
- a = ctx->attr;
+
if (ntfs_rl_truncate_nolock(vol, &mft_ni->runlist, old_last_vcn)) {
ntfs_error(vol->sb, "Failed to truncate mft data attribute "
"runlist.%s", es);
NVolSetErrors(vol);
}
- if (mp_rebuilt && !IS_ERR(ctx->mrec)) {
- if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
+ if (ctx) {
+ a = ctx->attr;
+ if (mp_rebuilt && !IS_ERR(ctx->mrec)) {
+ if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
a->data.non_resident.mapping_pairs_offset),
old_alen - le16_to_cpu(
- a->data.non_resident.mapping_pairs_offset),
+ a->data.non_resident.mapping_pairs_offset),
rl2, ll, -1, NULL)) {
- ntfs_error(vol->sb, "Failed to restore mapping pairs "
+ ntfs_error(vol->sb, "Failed to restore mapping pairs "
"array.%s", es);
- NVolSetErrors(vol);
- }
- if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
- ntfs_error(vol->sb, "Failed to restore attribute "
+ NVolSetErrors(vol);
+ }
+ if (ntfs_attr_record_resize(ctx->mrec, a, old_alen)) {
+ ntfs_error(vol->sb, "Failed to restore attribute "
"record.%s", es);
- NVolSetErrors(vol);
+ NVolSetErrors(vol);
+ }
+ flush_dcache_mft_record_page(ctx->ntfs_ino);
+ mark_mft_record_dirty(ctx->ntfs_ino);
}
- flush_dcache_mft_record_page(ctx->ntfs_ino);
- mark_mft_record_dirty(ctx->ntfs_ino);
- } else if (IS_ERR(ctx->mrec)) {
- ntfs_error(vol->sb, "Failed to restore attribute search "
+ else if (IS_ERR(ctx->mrec)) {
+ ntfs_error(vol->sb, "Failed to restore attribute search "
"context.%s", es);
- NVolSetErrors(vol);
+ NVolSetErrors(vol);
+ }
+ if (ctx)
+ ntfs_attr_put_search_ctx(ctx);
}
- if (ctx)
- ntfs_attr_put_search_ctx(ctx);
if (!IS_ERR(mrec))
unmap_mft_record(mft_ni);
up_write(&mft_ni->runlist.lock);
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index 0b8bc66377db..a9d82bbb4729 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -573,7 +573,7 @@ add_alloc_in_same_attr_seg:
sbi, run, vcn, lcn, to_allocate, &pre_alloc,
is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
is_mft ? 0 :
- (sbi->record_size -
+ (sbi->record_size -
le32_to_cpu(rec->used) + 8) /
3 +
1,
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index c0c6bcbc8c05..42631b31adf1 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -52,7 +52,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (!attr->non_res) {
lsize = le32_to_cpu(attr->res.data_size);
- le = kmalloc(al_aligned(lsize), GFP_NOFS);
+ le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
if (!le) {
err = -ENOMEM;
goto out;
@@ -80,7 +80,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (err < 0)
goto out;
- le = kmalloc(al_aligned(lsize), GFP_NOFS);
+ le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
if (!le) {
err = -ENOMEM;
goto out;
@@ -375,8 +375,7 @@ bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le)
* al_delete_le - Delete first le from the list which matches its parameters.
*/
bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
- const __le16 *name, size_t name_len,
- const struct MFT_REF *ref)
+ const __le16 *name, u8 name_len, const struct MFT_REF *ref)
{
u16 size;
struct ATTR_LIST_ENTRY *le;
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 9a6c6a09d70c..107e808e06ea 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -287,8 +287,8 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
/* Check bits before 'bit'. */
ib = wnd->zone_bit == wnd->zone_end ||
bit < wnd->zone_end ?
- 0 :
- wnd->zone_end;
+ 0 :
+ wnd->zone_end;
while (bit > ib && wnd_is_free_hlp(wnd, bit - 1, 1)) {
bit -= 1;
@@ -298,8 +298,8 @@ static void wnd_add_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len,
/* Check bits after 'end_in'. */
ib = wnd->zone_bit == wnd->zone_end ||
end_in > wnd->zone_bit ?
- wnd->nbits :
- wnd->zone_bit;
+ wnd->nbits :
+ wnd->zone_bit;
while (end_in < ib && wnd_is_free_hlp(wnd, end_in, 1)) {
end_in += 1;
@@ -418,7 +418,7 @@ static void wnd_remove_free_ext(struct wnd_bitmap *wnd, size_t bit, size_t len)
n3 = rb_first(&wnd->count_tree);
wnd->extent_max =
n3 ? rb_entry(n3, struct e_node, count.node)->count.key :
- 0;
+ 0;
return;
}
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 9a3d55c367d9..4c653945ef08 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -179,7 +179,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
{
int err = 0;
struct address_space *mapping = inode->i_mapping;
- u32 blocksize = 1 << inode->i_blkbits;
+ u32 blocksize = i_blocksize(inode);
pgoff_t idx = vbo >> PAGE_SHIFT;
u32 from = vbo & (PAGE_SIZE - 1);
pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -192,7 +192,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
for (; idx < idx_end; idx += 1, from = 0) {
page_off = (loff_t)idx << PAGE_SHIFT;
to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
- PAGE_SIZE;
+ PAGE_SIZE;
iblock = page_off >> inode->i_blkbits;
page = find_or_create_page(mapping, idx,
@@ -1052,7 +1052,7 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
- __generic_file_write_iter(iocb, from);
+ __generic_file_write_iter(iocb, from);
out:
inode_unlock(inode);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 2bfcf1a989c9..16bd9faa2d28 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -77,7 +77,7 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni)
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) :
- NULL;
+ NULL;
}
/*
@@ -92,7 +92,7 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni)
attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL);
return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) :
- NULL;
+ NULL;
}
/*
@@ -236,6 +236,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
return attr;
out:
+ ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record");
ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
return NULL;
}
@@ -384,7 +385,7 @@ bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi)
* ni_remove_attr - Remove all attributes for the given type/name/id.
*/
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, size_t name_len, bool base_only,
+ const __le16 *name, u8 name_len, bool base_only,
const __le16 *id)
{
int err;
@@ -517,6 +518,9 @@ out:
*/
static int ni_repack(struct ntfs_inode *ni)
{
+#if 1
+ return 0;
+#else
int err = 0;
struct ntfs_sb_info *sbi = ni->mi.sbi;
struct mft_inode *mi, *mi_p = NULL;
@@ -639,6 +643,7 @@ static int ni_repack(struct ntfs_inode *ni)
run_close(&run);
return err;
+#endif
}
/*
@@ -813,10 +818,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
* Looks like one record_size is always enough.
*/
le = kmalloc(al_aligned(rs), GFP_NOFS);
- if (!le) {
- err = -ENOMEM;
- goto out;
- }
+ if (!le)
+ return -ENOMEM;
mi_get_ref(&ni->mi, &le->ref);
ni->attr_list.le = le;
@@ -865,15 +868,16 @@ int ni_create_attr_list(struct ntfs_inode *ni)
if (to_free > free_b) {
err = -EINVAL;
- goto out1;
+ goto out;
}
}
/* Allocate child MFT. */
err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi);
if (err)
- goto out1;
+ goto out;
+ err = -EINVAL;
/* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */
while (to_free > 0) {
struct ATTRIB *b = arr_move[--nb];
@@ -882,7 +886,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
b->name_len, asize, name_off);
- WARN_ON(!attr);
+ if (!attr)
+ goto out;
mi_get_ref(mi, &le_b[nb]->ref);
le_b[nb]->id = attr->id;
@@ -892,17 +897,20 @@ int ni_create_attr_list(struct ntfs_inode *ni)
attr->id = le_b[nb]->id;
/* Remove from primary record. */
- WARN_ON(!mi_remove_attr(NULL, &ni->mi, b));
+ if (!mi_remove_attr(NULL, &ni->mi, b))
+ goto out;
if (to_free <= asize)
break;
to_free -= asize;
- WARN_ON(!nb);
+ if (!nb)
+ goto out;
}
attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
- WARN_ON(!attr);
+ if (!attr)
+ goto out;
attr->non_res = 0;
attr->flags = 0;
@@ -916,14 +924,12 @@ int ni_create_attr_list(struct ntfs_inode *ni)
ni->attr_list.dirty = false;
mark_inode_dirty(&ni->vfs_inode);
- goto out;
+ return 0;
-out1:
+out:
kfree(ni->attr_list.le);
ni->attr_list.le = NULL;
ni->attr_list.size = 0;
-
-out:
return err;
}
@@ -1638,14 +1644,13 @@ int ni_delete_all(struct ntfs_inode *ni)
* Return: File name attribute by its value.
*/
struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
- const struct cpu_str *uni,
+ const struct le_str *uni,
const struct MFT_REF *home_dir,
struct mft_inode **mi,
struct ATTR_LIST_ENTRY **le)
{
struct ATTRIB *attr = NULL;
struct ATTR_FILE_NAME *fname;
- struct le_str *fns;
if (le)
*le = NULL;
@@ -1669,10 +1674,9 @@ next:
if (uni->len != fname->name_len)
goto next;
- fns = (struct le_str *)&fname->name_len;
- if (ntfs_cmp_names_cpu(uni, fns, NULL, false))
+ if (ntfs_cmp_names(uni->name, uni->len, fname->name, uni->len, NULL,
+ false))
goto next;
-
return fname;
}
@@ -1757,8 +1761,8 @@ int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa)
/* Resize nonresident empty attribute in-place only. */
new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ?
- (SIZEOF_NONRESIDENT_EX + 8) :
- (SIZEOF_NONRESIDENT + 8);
+ (SIZEOF_NONRESIDENT_EX + 8) :
+ (SIZEOF_NONRESIDENT + 8);
if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size)))
return -EOPNOTSUPP;
@@ -2910,7 +2914,7 @@ int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni,
/* Find name in record. */
mi_get_ref(&dir_ni->mi, &de_name->home);
- fname = ni_fname_name(ni, (struct cpu_str *)&de_name->name_len,
+ fname = ni_fname_name(ni, (struct le_str *)&de_name->name_len,
&de_name->home, &mi, &le);
if (!fname)
return -ENOENT;
@@ -3160,8 +3164,8 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
__le64 valid_le;
dup->alloc_size = is_attr_ext(attr) ?
- attr->nres.total_size :
- attr->nres.alloc_size;
+ attr->nres.total_size :
+ attr->nres.alloc_size;
dup->data_size = attr->nres.data_size;
if (new_valid > data_size)
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 57762c5fe68b..12f28cdf5c83 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -828,8 +828,8 @@ static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
memcpy(rt + 1, tbl + 1, esize * used);
rt->free_goal = free_goal == ~0u ?
- cpu_to_le32(~0u) :
- cpu_to_le32(sizeof(struct RESTART_TABLE) +
+ cpu_to_le32(~0u) :
+ cpu_to_le32(sizeof(struct RESTART_TABLE) +
free_goal * esize);
if (tbl->first_free) {
@@ -1090,8 +1090,8 @@ static inline u64 base_lsn(struct ntfs_log *log,
<< log->file_data_bits) +
((((is_log_record_end(hdr) &&
h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn)) ?
- le16_to_cpu(hdr->record_hdr.next_record_off) :
- log->page_size) +
+ le16_to_cpu(hdr->record_hdr.next_record_off) :
+ log->page_size) +
lsn) >>
3);
@@ -1299,8 +1299,8 @@ static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
log->clst_per_page = 1;
log->first_page = major_ver >= 2 ?
- 0x22 * page_size :
- ((sys_page_size << 1) + (page_size << 1));
+ 0x22 * page_size :
+ ((sys_page_size << 1) + (page_size << 1));
log->major_ver = major_ver;
log->minor_ver = minor_ver;
}
@@ -1513,8 +1513,8 @@ static u32 current_log_avail(struct ntfs_log *log)
* If there is no oldest lsn then start at the first page of the file.
*/
oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN) ?
- log->first_page :
- (log->oldest_lsn_off & ~log->sys_page_mask);
+ log->first_page :
+ (log->oldest_lsn_off & ~log->sys_page_mask);
/*
* We will use the next log page offset to compute the next free page.
@@ -1522,9 +1522,9 @@ static u32 current_log_avail(struct ntfs_log *log)
* If we are at the first page then use the end of the file.
*/
next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL) ?
- log->next_page + log->page_size :
+ log->next_page + log->page_size :
log->next_page == log->first_page ? log->l_size :
- log->next_page;
+ log->next_page;
/* If the two offsets are the same then there is no available space. */
if (oldest_off == next_free_off)
@@ -1535,8 +1535,8 @@ static u32 current_log_avail(struct ntfs_log *log)
*/
free_bytes =
oldest_off < next_free_off ?
- log->total_avail_pages - (next_free_off - oldest_off) :
- oldest_off - next_free_off;
+ log->total_avail_pages - (next_free_off - oldest_off) :
+ oldest_off - next_free_off;
free_bytes >>= log->page_bits;
return free_bytes * log->reserved;
@@ -1671,7 +1671,7 @@ next_tail:
best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
best_lsn2 = second_tail ? base_lsn(log, second_tail, second_file_off) :
- 0;
+ 0;
if (first_tail && second_tail) {
if (best_lsn1 > best_lsn2) {
@@ -1767,7 +1767,7 @@ tail_read:
page_cnt = page_pos = 1;
curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off) :
- log->next_page;
+ log->next_page;
wrapped_file =
curpage_off == log->first_page &&
@@ -1826,8 +1826,8 @@ use_cur_page:
((lsn_cur >> log->file_data_bits) +
((curpage_off <
(lsn_to_vbo(log, lsn_cur) & ~log->page_mask)) ?
- 1 :
- 0)) != expected_seq) {
+ 1 :
+ 0)) != expected_seq) {
goto check_tail;
}
@@ -2643,8 +2643,8 @@ static inline bool check_index_root(const struct ATTRIB *attr,
const struct INDEX_ROOT *root = resident_data(attr);
u8 index_bits = le32_to_cpu(root->index_block_size) >=
sbi->cluster_size ?
- sbi->cluster_bits :
- SECTOR_SHIFT;
+ sbi->cluster_bits :
+ SECTOR_SHIFT;
u8 block_clst = root->index_block_clst;
if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
@@ -3861,9 +3861,9 @@ check_restart_area:
/* If we have a valid page then grab a pointer to the restart area. */
ra2 = rst_info.valid_page ?
- Add2Ptr(rst_info.r_page,
+ Add2Ptr(rst_info.r_page,
le16_to_cpu(rst_info.r_page->ra_off)) :
- NULL;
+ NULL;
if (rst_info.chkdsk_was_run ||
(ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 28cc421102e5..33afee0f5559 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -9,6 +9,7 @@
#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/kernel.h>
+#include <linux/nls.h>
#include "debug.h"
#include "ntfs.h"
@@ -173,12 +174,12 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
fo = le16_to_cpu(rhdr->fix_off);
fn = simple ? ((bytes >> SECTOR_SHIFT) + 1) :
- le16_to_cpu(rhdr->fix_num);
+ le16_to_cpu(rhdr->fix_num);
/* Check errors. */
if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
fn * SECTOR_SIZE > bytes) {
- return -EINVAL; /* Native chkntfs returns ok! */
+ return -E_NTFS_CORRUPT;
}
/* Get fixup pointer. */
@@ -1661,7 +1662,8 @@ int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
return 0;
}
-struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
+struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno,
+ enum RECORD_FLAG flag)
{
int err = 0;
struct super_block *sb = sbi->sb;
@@ -1673,8 +1675,7 @@ struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
ni = ntfs_i(inode);
- err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
- false);
+ err = mi_format_new(&ni->mi, sbi, rno, flag, false);
if (err)
goto out;
@@ -1937,7 +1938,7 @@ int ntfs_security_init(struct ntfs_sb_info *sbi)
break;
sii_e = (struct NTFS_DE_SII *)ne;
- if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
+ if (le16_to_cpu(ne->view.data_size) < sizeof(sii_e->sec_hdr))
continue;
next_id = le32_to_cpu(sii_e->sec_id) + 1;
@@ -1998,18 +1999,18 @@ int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
goto out;
t32 = le32_to_cpu(sii_e->sec_hdr.size);
- if (t32 < SIZEOF_SECURITY_HDR) {
+ if (t32 < sizeof(struct SECURITY_HDR)) {
err = -EINVAL;
goto out;
}
- if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
+ if (t32 > sizeof(struct SECURITY_HDR) + 0x10000) {
/* Looks like too big security. 0x10000 - is arbitrary big number. */
err = -EFBIG;
goto out;
}
- *size = t32 - SIZEOF_SECURITY_HDR;
+ *size = t32 - sizeof(struct SECURITY_HDR);
p = kmalloc(*size, GFP_NOFS);
if (!p) {
@@ -2023,14 +2024,14 @@ int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
if (err)
goto out;
- if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
+ if (memcmp(&d_security, &sii_e->sec_hdr, sizeof(d_security))) {
err = -EINVAL;
goto out;
}
err = ntfs_read_run_nb(sbi, &ni->file.run,
le64_to_cpu(sii_e->sec_hdr.off) +
- SIZEOF_SECURITY_HDR,
+ sizeof(struct SECURITY_HDR),
p, *size, NULL);
if (err)
goto out;
@@ -2069,7 +2070,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
struct NTFS_DE_SDH sdh_e;
struct NTFS_DE_SII sii_e;
struct SECURITY_HDR *d_security;
- u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
+ u32 new_sec_size = size_sd + sizeof(struct SECURITY_HDR);
u32 aligned_sec_size = ALIGN(new_sec_size, 16);
struct SECURITY_KEY hash_key;
struct ntfs_fnd *fnd_sdh = NULL;
@@ -2207,14 +2208,14 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
/* Fill SII entry. */
sii_e.de.view.data_off =
cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
- sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
+ sii_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
sii_e.de.view.res = 0;
- sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
+ sii_e.de.size = cpu_to_le16(sizeof(struct NTFS_DE_SII));
sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
sii_e.de.flags = 0;
sii_e.de.res = 0;
sii_e.sec_id = d_security->key.sec_id;
- memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
+ memcpy(&sii_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
if (err)
@@ -2223,7 +2224,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
/* Fill SDH entry. */
sdh_e.de.view.data_off =
cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
- sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
+ sdh_e.de.view.data_size = cpu_to_le16(sizeof(struct SECURITY_HDR));
sdh_e.de.view.res = 0;
sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
@@ -2231,7 +2232,7 @@ int ntfs_insert_security(struct ntfs_sb_info *sbi,
sdh_e.de.res = 0;
sdh_e.key.hash = d_security->key.hash;
sdh_e.key.sec_id = d_security->key.sec_id;
- memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
+ memcpy(&sdh_e.sec_hdr, d_security, sizeof(struct SECURITY_HDR));
sdh_e.magic[0] = cpu_to_le16('I');
sdh_e.magic[1] = cpu_to_le16('I');
@@ -2522,7 +2523,8 @@ out:
/*
* run_deallocate - Deallocate clusters.
*/
-int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
+int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ bool trim)
{
CLST lcn, len;
size_t idx = 0;
@@ -2578,13 +2580,13 @@ static inline bool name_has_forbidden_chars(const struct le_str *fname)
return false;
}
-static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
+static inline bool is_reserved_name(const struct ntfs_sb_info *sbi,
const struct le_str *fname)
{
int port_digit;
const __le16 *name = fname->name;
int len = fname->len;
- u16 *upcase = sbi->upcase;
+ const u16 *upcase = sbi->upcase;
/* check for 3 chars reserved names (device names) */
/* name by itself or with any extension is forbidden */
@@ -2618,3 +2620,60 @@ bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
return !name_has_forbidden_chars(fname) &&
!is_reserved_name(sbi, fname);
}
+
+/*
+ * ntfs_set_label - updates current ntfs label.
+ */
+int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len)
+{
+ int err;
+ struct ATTRIB *attr;
+ struct ntfs_inode *ni = sbi->volume.ni;
+ const u8 max_ulen = 0x80; /* TODO: use attrdef to get maximum length */
+ /* Allocate PATH_MAX bytes. */
+ struct cpu_str *uni = __getname();
+
+ if (!uni)
+ return -ENOMEM;
+
+ err = ntfs_nls_to_utf16(sbi, label, len, uni, (PATH_MAX - 2) / 2,
+ UTF16_LITTLE_ENDIAN);
+ if (err < 0)
+ goto out;
+
+ if (uni->len > max_ulen) {
+ ntfs_warn(sbi->sb, "new label is too long");
+ err = -EFBIG;
+ goto out;
+ }
+
+ ni_lock(ni);
+
+ /* Ignore any errors. */
+ ni_remove_attr(ni, ATTR_LABEL, NULL, 0, false, NULL);
+
+ err = ni_insert_resident(ni, uni->len * sizeof(u16), ATTR_LABEL, NULL,
+ 0, &attr, NULL, NULL);
+ if (err < 0)
+ goto unlock_out;
+
+ /* write new label in on-disk struct. */
+ memcpy(resident_data(attr), uni->name, uni->len * sizeof(u16));
+
+ /* update cached value of current label. */
+ if (len >= ARRAY_SIZE(sbi->volume.label))
+ len = ARRAY_SIZE(sbi->volume.label) - 1;
+ memcpy(sbi->volume.label, label, len);
+ sbi->volume.label[len] = 0;
+ mark_inode_dirty_sync(&ni->vfs_inode);
+
+unlock_out:
+ ni_unlock(ni);
+
+ if (!err)
+ err = _ni_write_inode(&ni->vfs_inode, 0);
+
+out:
+ __putname(uni);
+ return err;
+} \ No newline at end of file
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 0a48d2d67219..124c6e822623 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -432,8 +432,8 @@ next_run:
nbits = 8 * (data_size - vbo);
ok = nbits > from ?
- (*fn)((ulong *)bh->b_data, from, nbits, ret) :
- false;
+ (*fn)((ulong *)bh->b_data, from, nbits, ret) :
+ false;
put_bh(bh);
if (ok) {
@@ -1113,6 +1113,12 @@ ok:
*node = in;
out:
+ if (err == -E_NTFS_CORRUPT) {
+ ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
+ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
+ err = -EINVAL;
+ }
+
if (ib != in->index)
kfree(ib);
@@ -1676,8 +1682,8 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni,
/* Create alloc and bitmap attributes (if not). */
err = run_is_empty(&indx->alloc_run) ?
- indx_create_allocate(indx, ni, &new_vbn) :
- indx_add_allocate(indx, ni, &new_vbn);
+ indx_create_allocate(indx, ni, &new_vbn) :
+ indx_add_allocate(indx, ni, &new_vbn);
/* Layout of record may be changed, so rescan root. */
root = indx_get_root(indx, ni, &attr, &mi);
@@ -1868,8 +1874,8 @@ indx_insert_into_buffer(struct ntfs_index *indx, struct ntfs_inode *ni,
(*indx->cmp)(new_de + 1, le16_to_cpu(new_de->key_size),
up_e + 1, le16_to_cpu(up_e->key_size),
ctx) < 0 ?
- hdr2 :
- hdr1,
+ hdr2 :
+ hdr1,
new_de, NULL, ctx);
indx_mark_used(indx, ni, new_vbn >> indx->idx2vbn_bits);
@@ -2340,7 +2346,7 @@ int indx_delete_entry(struct ntfs_index *indx, struct ntfs_inode *ni,
re, ctx,
fnd->level - 1,
fnd) :
- indx_insert_into_root(indx, ni, re, e,
+ indx_insert_into_root(indx, ni, re, e,
ctx, fnd, 0);
kfree(re);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 6c560245eef4..dc7e7ab701c6 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -263,7 +263,7 @@ next_attr:
goto next_attr;
run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run :
- &ni->file.run;
+ &ni->file.run;
break;
case ATTR_ROOT:
@@ -291,8 +291,8 @@ next_attr:
goto out;
mode = sb->s_root ?
- (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
- (S_IFDIR | 0777);
+ (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) :
+ (S_IFDIR | 0777);
goto next_attr;
case ATTR_ALLOC:
@@ -450,7 +450,7 @@ end_enum:
inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
- &ntfs_aops;
+ &ntfs_aops;
if (ino != MFT_REC_MFT)
init_rwsem(&ni->file.run_lock);
} else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
@@ -787,7 +787,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ret = blockdev_direct_IO(iocb, inode, iter,
wr ? ntfs_get_block_direct_IO_W :
- ntfs_get_block_direct_IO_R);
+ ntfs_get_block_direct_IO_R);
if (ret > 0)
end = vbo + ret;
@@ -1191,11 +1191,11 @@ out:
* - ntfs_symlink
* - ntfs_mkdir
* - ntfs_atomic_open
- *
+ *
* NOTE: if fnd != NULL (ntfs_atomic_open) then @dir is locked
*/
-struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
+struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry,
const struct cpu_str *uni, umode_t mode,
dev_t dev, const char *symname, u32 size,
struct ntfs_fnd *fnd)
@@ -1309,7 +1309,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
if (err)
goto out2;
- ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
+ ni = ntfs_new_inode(sbi, ino, S_ISDIR(mode) ? RECORD_FLAG_DIR : 0);
if (IS_ERR(ni)) {
err = PTR_ERR(ni);
ni = NULL;
@@ -1437,8 +1437,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
- root->ihdr.de_off =
- cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
+ root->ihdr.de_off = cpu_to_le32(sizeof(struct INDEX_HDR));
root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
sizeof(struct NTFS_DE));
root->ihdr.total = root->ihdr.used;
@@ -1605,7 +1604,7 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
inode->i_op = &ntfs_file_inode_operations;
inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
- &ntfs_aops;
+ &ntfs_aops;
init_rwsem(&ni->file.run_lock);
} else {
inode->i_op = &ntfs_special_inode_operations;
diff --git a/fs/ntfs3/lznt.c b/fs/ntfs3/lznt.c
index 61e161c7c567..4aae598d6d88 100644
--- a/fs/ntfs3/lznt.c
+++ b/fs/ntfs3/lznt.c
@@ -297,7 +297,7 @@ next:
struct lznt *get_lznt_ctx(int level)
{
struct lznt *r = kzalloc(level ? offsetof(struct lznt, hash) :
- sizeof(struct lznt),
+ sizeof(struct lznt),
GFP_NOFS);
if (r)
@@ -393,8 +393,8 @@ ssize_t decompress_lznt(const void *cmpr, size_t cmpr_size, void *unc,
} else {
/* This chunk does not contain compressed data. */
unc_use = unc_chunk + LZNT_CHUNK_SIZE > unc_end ?
- unc_end - unc_chunk :
- LZNT_CHUNK_SIZE;
+ unc_end - unc_chunk :
+ LZNT_CHUNK_SIZE;
if (cmpr_chunk + sizeof(chunk_hdr) + unc_use >
cmpr_end) {
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index 9736b1e4a0f6..70f8c859e0ad 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -109,8 +109,8 @@ static int ntfs_create(struct mnt_idmap *idmap, struct inode *dir,
{
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFREG | mode,
- 0, NULL, 0, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFREG | mode, 0,
+ NULL, 0, NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -125,8 +125,8 @@ static int ntfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
{
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, mode, rdev,
- NULL, 0, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, mode, rdev, NULL, 0,
+ NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -199,8 +199,8 @@ static int ntfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
u32 size = strlen(symname);
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777,
- 0, symname, size, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFLNK | 0777, 0,
+ symname, size, NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -213,8 +213,8 @@ static int ntfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
{
struct inode *inode;
- inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode,
- 0, NULL, 0, NULL);
+ inode = ntfs_create_inode(idmap, dir, dentry, NULL, S_IFDIR | mode, 0,
+ NULL, 0, NULL);
return IS_ERR(inode) ? PTR_ERR(inode) : 0;
}
@@ -422,19 +422,10 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
* fnd contains tree's path to insert to.
* If fnd is not NULL then dir is locked.
*/
-
- /*
- * Unfortunately I don't know how to get here correct 'struct nameidata *nd'
- * or 'struct mnt_idmap *idmap'.
- * See atomic_open in fs/namei.c.
- * This is why xfstest/633 failed.
- * Looks like ntfs_atomic_open must accept 'struct mnt_idmap *idmap' as argument.
- */
-
- inode = ntfs_create_inode(&nop_mnt_idmap, dir, dentry, uni, mode, 0,
- NULL, 0, fnd);
+ inode = ntfs_create_inode(mnt_idmap(file->f_path.mnt), dir, dentry, uni,
+ mode, 0, NULL, 0, fnd);
err = IS_ERR(inode) ? PTR_ERR(inode) :
- finish_open(file, dentry, ntfs_file_open);
+ finish_open(file, dentry, ntfs_file_open);
dput(d);
out2:
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 90151e56c122..98b76d1b09e7 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -95,11 +95,10 @@ enum RECORD_NUM {
MFT_REC_BITMAP = 6,
MFT_REC_BOOT = 7,
MFT_REC_BADCLUST = 8,
- //MFT_REC_QUOTA = 9,
- MFT_REC_SECURE = 9, // NTFS 3.0
+ MFT_REC_SECURE = 9,
MFT_REC_UPCASE = 10,
- MFT_REC_EXTEND = 11, // NTFS 3.0
- MFT_REC_RESERVED = 11,
+ MFT_REC_EXTEND = 11,
+ MFT_REC_RESERVED = 12,
MFT_REC_FREE = 16,
MFT_REC_USER = 24,
};
@@ -109,7 +108,6 @@ enum ATTR_TYPE {
ATTR_STD = cpu_to_le32(0x10),
ATTR_LIST = cpu_to_le32(0x20),
ATTR_NAME = cpu_to_le32(0x30),
- // ATTR_VOLUME_VERSION on Nt4
ATTR_ID = cpu_to_le32(0x40),
ATTR_SECURE = cpu_to_le32(0x50),
ATTR_LABEL = cpu_to_le32(0x60),
@@ -118,7 +116,6 @@ enum ATTR_TYPE {
ATTR_ROOT = cpu_to_le32(0x90),
ATTR_ALLOC = cpu_to_le32(0xA0),
ATTR_BITMAP = cpu_to_le32(0xB0),
- // ATTR_SYMLINK on Nt4
ATTR_REPARSE = cpu_to_le32(0xC0),
ATTR_EA_INFO = cpu_to_le32(0xD0),
ATTR_EA = cpu_to_le32(0xE0),
@@ -144,6 +141,7 @@ enum FILE_ATTRIBUTE {
FILE_ATTRIBUTE_ENCRYPTED = cpu_to_le32(0x00004000),
FILE_ATTRIBUTE_VALID_FLAGS = cpu_to_le32(0x00007fb7),
FILE_ATTRIBUTE_DIRECTORY = cpu_to_le32(0x10000000),
+ FILE_ATTRIBUTE_INDEX = cpu_to_le32(0x20000000)
};
static_assert(sizeof(enum FILE_ATTRIBUTE) == 4);
@@ -266,7 +264,7 @@ enum RECORD_FLAG {
RECORD_FLAG_IN_USE = cpu_to_le16(0x0001),
RECORD_FLAG_DIR = cpu_to_le16(0x0002),
RECORD_FLAG_SYSTEM = cpu_to_le16(0x0004),
- RECORD_FLAG_UNKNOWN = cpu_to_le16(0x0008),
+ RECORD_FLAG_INDEX = cpu_to_le16(0x0008),
};
/* MFT Record structure. */
@@ -290,6 +288,15 @@ struct MFT_REC {
#define MFTRECORD_FIXUP_OFFSET_1 offsetof(struct MFT_REC, res)
#define MFTRECORD_FIXUP_OFFSET_3 offsetof(struct MFT_REC, fixups)
+/*
+ * define MFTRECORD_FIXUP_OFFSET as MFTRECORD_FIXUP_OFFSET_3 (0x30)
+ * to format new mft records with bigger header (as current ntfs.sys does)
+ *
+ * define MFTRECORD_FIXUP_OFFSET as MFTRECORD_FIXUP_OFFSET_1 (0x2A)
+ * to format new mft records with smaller header (as old ntfs.sys did)
+ * Both variants are valid.
+ */
+#define MFTRECORD_FIXUP_OFFSET MFTRECORD_FIXUP_OFFSET_1
static_assert(MFTRECORD_FIXUP_OFFSET_1 == 0x2A);
static_assert(MFTRECORD_FIXUP_OFFSET_3 == 0x30);
@@ -331,18 +338,18 @@ struct ATTR_NONRESIDENT {
__le64 svcn; // 0x10: Starting VCN of this segment.
__le64 evcn; // 0x18: End VCN of this segment.
__le16 run_off; // 0x20: Offset to packed runs.
- // Unit of Compression size for this stream, expressed
- // as a log of the cluster size.
+ // Unit of Compression size for this stream, expressed
+ // as a log of the cluster size.
//
- // 0 means file is not compressed
- // 1, 2, 3, and 4 are potentially legal values if the
- // stream is compressed, however the implementation
- // may only choose to use 4, or possibly 3. Note
- // that 4 means cluster size time 16. If convenient
- // the implementation may wish to accept a
- // reasonable range of legal values here (1-5?),
- // even if the implementation only generates
- // a smaller set of values itself.
+ // 0 means file is not compressed
+ // 1, 2, 3, and 4 are potentially legal values if the
+ // stream is compressed, however the implementation
+ // may only choose to use 4, or possibly 3.
+ // Note that 4 means cluster size time 16.
+ // If convenient the implementation may wish to accept a
+ // reasonable range of legal values here (1-5?),
+ // even if the implementation only generates
+ // a smaller set of values itself.
u8 c_unit; // 0x22:
u8 res1[5]; // 0x23:
__le64 alloc_size; // 0x28: The allocated size of attribute in bytes.
@@ -836,16 +843,22 @@ static_assert(sizeof(struct ATTR_DEF_ENTRY) == 0xa0);
/* Object ID (0x40) */
struct OBJECT_ID {
struct GUID ObjId; // 0x00: Unique Id assigned to file.
- struct GUID BirthVolumeId; // 0x10: Birth Volume Id is the Object Id of the Volume on.
- // which the Object Id was allocated. It never changes.
- struct GUID BirthObjectId; // 0x20: Birth Object Id is the first Object Id that was
- // ever assigned to this MFT Record. I.e. If the Object Id
- // is changed for some reason, this field will reflect the
- // original value of the Object Id.
- struct GUID DomainId; // 0x30: Domain Id is currently unused but it is intended to be
- // used in a network environment where the local machine is
- // part of a Windows 2000 Domain. This may be used in a Windows
- // 2000 Advanced Server managed domain.
+
+ // Birth Volume Id is the Object Id of the Volume on.
+ // which the Object Id was allocated. It never changes.
+ struct GUID BirthVolumeId; //0x10:
+
+ // Birth Object Id is the first Object Id that was
+ // ever assigned to this MFT Record. I.e. If the Object Id
+ // is changed for some reason, this field will reflect the
+ // original value of the Object Id.
+ struct GUID BirthObjectId; // 0x20:
+
+ // Domain Id is currently unused but it is intended to be
+ // used in a network environment where the local machine is
+ // part of a Windows 2000 Domain. This may be used in a Windows
+ // 2000 Advanced Server managed domain.
+ struct GUID DomainId; // 0x30:
};
static_assert(sizeof(struct OBJECT_ID) == 0x40);
@@ -855,32 +868,35 @@ struct NTFS_DE_O {
struct NTFS_DE de;
struct GUID ObjId; // 0x10: Unique Id assigned to file.
struct MFT_REF ref; // 0x20: MFT record number with this file.
- struct GUID BirthVolumeId; // 0x28: Birth Volume Id is the Object Id of the Volume on
- // which the Object Id was allocated. It never changes.
- struct GUID BirthObjectId; // 0x38: Birth Object Id is the first Object Id that was
- // ever assigned to this MFT Record. I.e. If the Object Id
- // is changed for some reason, this field will reflect the
- // original value of the Object Id.
- // This field is valid if data_size == 0x48.
- struct GUID BirthDomainId; // 0x48: Domain Id is currently unused but it is intended
- // to be used in a network environment where the local
- // machine is part of a Windows 2000 Domain. This may be
- // used in a Windows 2000 Advanced Server managed domain.
+
+ // Birth Volume Id is the Object Id of the Volume on
+ // which the Object Id was allocated. It never changes.
+ struct GUID BirthVolumeId; // 0x28:
+
+ // Birth Object Id is the first Object Id that was
+ // ever assigned to this MFT Record. I.e. If the Object Id
+ // is changed for some reason, this field will reflect the
+ // original value of the Object Id.
+ // This field is valid if data_size == 0x48.
+ struct GUID BirthObjectId; // 0x38:
+
+ // Domain Id is currently unused but it is intended
+ // to be used in a network environment where the local
+ // machine is part of a Windows 2000 Domain. This may be
+ // used in a Windows 2000 Advanced Server managed domain.
+ struct GUID BirthDomainId; // 0x48:
};
static_assert(sizeof(struct NTFS_DE_O) == 0x58);
-#define NTFS_OBJECT_ENTRY_DATA_SIZE1 \
- 0x38 // struct NTFS_DE_O.BirthDomainId is not used
-#define NTFS_OBJECT_ENTRY_DATA_SIZE2 \
- 0x48 // struct NTFS_DE_O.BirthDomainId is used
-
/* Q Directory entry structure ( rule = 0x11 ) */
struct NTFS_DE_Q {
struct NTFS_DE de;
__le32 owner_id; // 0x10: Unique Id assigned to file
+
+ /* here is 0x30 bytes of user quota. NOTE: 4 byte aligned! */
__le32 Version; // 0x14: 0x02
- __le32 flags2; // 0x18: Quota flags, see above
+ __le32 Flags; // 0x18: Quota flags, see above
__le64 BytesUsed; // 0x1C:
__le64 ChangeTime; // 0x24:
__le64 WarningLimit; // 0x28:
@@ -888,9 +904,9 @@ struct NTFS_DE_Q {
__le64 ExceededTime; // 0x3C:
// SID is placed here
-}; // sizeof() = 0x44
+}__packed; // sizeof() = 0x44
-#define SIZEOF_NTFS_DE_Q 0x44
+static_assert(sizeof(struct NTFS_DE_Q) == 0x44);
#define SecurityDescriptorsBlockSize 0x40000 // 256K
#define SecurityDescriptorMaxSize 0x20000 // 128K
@@ -912,7 +928,7 @@ struct SECURITY_HDR {
*/
} __packed;
-#define SIZEOF_SECURITY_HDR 0x14
+static_assert(sizeof(struct SECURITY_HDR) == 0x14);
/* SII Directory entry structure */
struct NTFS_DE_SII {
@@ -921,7 +937,8 @@ struct NTFS_DE_SII {
struct SECURITY_HDR sec_hdr; // 0x14:
} __packed;
-#define SIZEOF_SII_DIRENTRY 0x28
+static_assert(offsetof(struct NTFS_DE_SII, sec_hdr) == 0x14);
+static_assert(sizeof(struct NTFS_DE_SII) == 0x28);
/* SDH Directory entry structure */
struct NTFS_DE_SDH {
@@ -1155,7 +1172,7 @@ struct REPARSE_DATA_BUFFER {
#define FILE_NEED_EA 0x80 // See ntifs.h
/*
- *FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
+ * FILE_NEED_EA, indicates that the file to which the EA belongs cannot be
* interpreted without understanding the associated extended attributes.
*/
struct EA_INFO {
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index eb01f7e76479..629403ede6e5 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -53,6 +53,8 @@ enum utf16_endian;
#define E_NTFS_NONRESIDENT 556
/* NTFS specific error code about punch hole. */
#define E_NTFS_NOTALIGNED 557
+/* NTFS specific error code when on-disk struct is corrupted. */
+#define E_NTFS_CORRUPT 558
/* sbi->flags */
@@ -274,7 +276,7 @@ struct ntfs_sb_info {
__le16 flags; // Cached current VOLUME_INFO::flags, VOLUME_FLAG_DIRTY.
u8 major_ver;
u8 minor_ver;
- char label[65];
+ char label[256];
bool real_dirty; // Real fs state.
} volume;
@@ -284,7 +286,6 @@ struct ntfs_sb_info {
struct ntfs_inode *ni;
u32 next_id;
u64 next_off;
-
__le32 def_security_id;
} security;
@@ -312,6 +313,7 @@ struct ntfs_sb_info {
struct ntfs_mount_options *options;
struct ratelimit_state msg_ratelimit;
+ struct proc_dir_entry *procdir;
};
/* One MFT record(usually 1024 bytes), consists of attributes. */
@@ -465,8 +467,7 @@ int al_add_le(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name,
struct ATTR_LIST_ENTRY **new_le);
bool al_remove_le(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le);
bool al_delete_le(struct ntfs_inode *ni, enum ATTR_TYPE type, CLST vcn,
- const __le16 *name, size_t name_len,
- const struct MFT_REF *ref);
+ const __le16 *name, u8 name_len, const struct MFT_REF *ref);
int al_update(struct ntfs_inode *ni, int sync);
static inline size_t al_aligned(size_t size)
{
@@ -525,7 +526,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
int ni_load_all_mi(struct ntfs_inode *ni);
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, size_t name_len, bool base_only,
+ const __le16 *name, u8 name_len, bool base_only,
const __le16 *id);
int ni_create_attr_list(struct ntfs_inode *ni);
int ni_expand_list(struct ntfs_inode *ni);
@@ -542,7 +543,7 @@ void ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr,
struct mft_inode *mi, struct ATTR_LIST_ENTRY *le);
int ni_delete_all(struct ntfs_inode *ni);
struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
- const struct cpu_str *uni,
+ const struct le_str *uni,
const struct MFT_REF *home,
struct mft_inode **mi,
struct ATTR_LIST_ENTRY **entry);
@@ -629,7 +630,7 @@ int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run);
int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
u64 vbo, u64 *lbo, u64 *bytes);
struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST nRec,
- bool dir);
+ enum RECORD_FLAG flag);
extern const u8 s_default_security[0x50];
bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len);
int ntfs_security_init(struct ntfs_sb_info *sbi);
@@ -647,8 +648,10 @@ int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
const struct MFT_REF *ref);
void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim);
-int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim);
+int run_deallocate(struct ntfs_sb_info *sbi, const struct runs_tree *run,
+ bool trim);
bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *name);
+int ntfs_set_label(struct ntfs_sb_info *sbi, u8 *label, int len);
/* Globals from index.c */
int indx_used_bit(struct ntfs_index *indx, struct ntfs_inode *ni, size_t *bit);
@@ -706,8 +709,8 @@ int ntfs_sync_inode(struct inode *inode);
int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
struct inode *i2);
int inode_write_data(struct inode *inode, const void *data, size_t bytes);
-struct inode *ntfs_create_inode(struct mnt_idmap *idmap,
- struct inode *dir, struct dentry *dentry,
+struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry,
const struct cpu_str *uni, umode_t mode,
dev_t dev, const char *symname, u32 size,
struct ntfs_fnd *fnd);
@@ -736,7 +739,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr);
// TODO: id?
struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
enum ATTR_TYPE type, const __le16 *name,
- size_t name_len, const __le16 *id);
+ u8 name_len, const __le16 *id);
static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec,
struct ATTR_LIST_ENTRY *le)
{
@@ -856,12 +859,12 @@ unsigned long ntfs_names_hash(const u16 *name, size_t len, const u16 *upcase,
/* globals from xattr.c */
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
-struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap,
- struct dentry *dentry, int type);
+struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ int type);
int ntfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
- struct inode *dir);
+ struct inode *dir);
#else
#define ntfs_get_acl NULL
#define ntfs_set_acl NULL
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 2a281cead2bc..c12ebffc94da 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
struct rw_semaphore *rw_lock = NULL;
if (is_mounted(sbi)) {
- if (!is_mft) {
+ if (!is_mft && mft_ni) {
rw_lock = &mft_ni->file.run_lock;
down_read(rw_lock);
}
@@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
ni_lock(mft_ni);
down_write(rw_lock);
}
- err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
+ err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
vbo >> sbi->cluster_bits);
if (rw_lock) {
up_write(rw_lock);
@@ -180,6 +180,12 @@ ok:
return 0;
out:
+ if (err == -E_NTFS_CORRUPT) {
+ ntfs_err(sbi->sb, "mft corrupted");
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ err = -EINVAL;
+ }
+
return err;
}
@@ -296,7 +302,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
*/
struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
enum ATTR_TYPE type, const __le16 *name,
- size_t name_len, const __le16 *id)
+ u8 name_len, const __le16 *id)
{
u32 type_in = le32_to_cpu(type);
u32 atype;
@@ -382,6 +388,8 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
rec->seq = cpu_to_le16(seq);
rec->flags = RECORD_FLAG_IN_USE | flags;
+ if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
+ rec->mft_record = cpu_to_le32(rno);
mi->dirty = true;
diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c
index 47612d16c027..cb8cf0161177 100644
--- a/fs/ntfs3/run.c
+++ b/fs/ntfs3/run.c
@@ -434,8 +434,8 @@ requires_new_range:
if (should_add_tail) {
tail_lcn = r->lcn == SPARSE_LCN ?
- SPARSE_LCN :
- (r->lcn + Tovcn);
+ SPARSE_LCN :
+ (r->lcn + Tovcn);
tail_vcn = r->vcn + Tovcn;
tail_len = r->len - Tovcn;
}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 5158dd31fd97..e36769eac7de 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -57,6 +57,7 @@
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/nls.h>
+#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/statfs.h>
@@ -116,8 +117,8 @@ void ntfs_inode_printk(struct inode *inode, const char *fmt, ...)
/* Use static allocated buffer, if possible. */
name = atomic_dec_and_test(&s_name_buf_cnt) ?
- s_name_buf :
- kmalloc(sizeof(s_name_buf), GFP_NOFS);
+ s_name_buf :
+ kmalloc(sizeof(s_name_buf), GFP_NOFS);
if (name) {
struct dentry *de = d_find_alias(inode);
@@ -257,6 +258,7 @@ enum Opt {
Opt_err,
};
+// clang-format off
static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_u32("uid", Opt_uid),
fsparam_u32("gid", Opt_gid),
@@ -277,9 +279,13 @@ static const struct fs_parameter_spec ntfs_fs_parameters[] = {
fsparam_flag_no("nocase", Opt_nocase),
{}
};
+// clang-format on
/*
* Load nls table or if @nls is utf8 then return NULL.
+ *
+ * It is good idea to use here "const char *nls".
+ * But load_nls accepts "char*".
*/
static struct nls_table *ntfs_load_nls(char *nls)
{
@@ -436,6 +442,103 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return 0;
}
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *proc_info_root;
+
+/*
+ * ntfs3_volinfo:
+ *
+ * The content of /proc/fs/ntfs3/<dev>/volinfo
+ *
+ * ntfs3.1
+ * cluster size
+ * number of clusters
+*/
+static int ntfs3_volinfo(struct seq_file *m, void *o)
+{
+ struct super_block *sb = m->private;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ seq_printf(m, "ntfs%d.%d\n%u\n%zu\n", sbi->volume.major_ver,
+ sbi->volume.minor_ver, sbi->cluster_size,
+ sbi->used.bitmap.nbits);
+
+ return 0;
+}
+
+static int ntfs3_volinfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ntfs3_volinfo, pde_data(inode));
+}
+
+/* read /proc/fs/ntfs3/<dev>/label */
+static int ntfs3_label_show(struct seq_file *m, void *o)
+{
+ struct super_block *sb = m->private;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ seq_printf(m, "%s\n", sbi->volume.label);
+
+ return 0;
+}
+
+/* write /proc/fs/ntfs3/<dev>/label */
+static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ struct super_block *sb = pde_data(file_inode(file));
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ ssize_t ret = count;
+ u8 *label = kmalloc(count, GFP_NOFS);
+
+ if (!label)
+ return -ENOMEM;
+
+ if (copy_from_user(label, buffer, ret)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ while (ret > 0 && label[ret - 1] == '\n')
+ ret -= 1;
+
+ err = ntfs_set_label(sbi, label, ret);
+
+ if (err < 0) {
+ ntfs_err(sb, "failed (%d) to write label", err);
+ ret = err;
+ goto out;
+ }
+
+ *ppos += count;
+ ret = count;
+out:
+ kfree(label);
+ return ret;
+}
+
+static int ntfs3_label_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ntfs3_label_show, pde_data(inode));
+}
+
+static const struct proc_ops ntfs3_volinfo_fops = {
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_open = ntfs3_volinfo_open,
+};
+
+static const struct proc_ops ntfs3_label_fops = {
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+ .proc_open = ntfs3_label_open,
+ .proc_write = ntfs3_label_write,
+};
+
+#endif
+
static struct kmem_cache *ntfs_inode_cachep;
static struct inode *ntfs_alloc_inode(struct super_block *sb)
@@ -510,6 +613,16 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
+#ifdef CONFIG_PROC_FS
+ // Remove /proc/fs/ntfs3/..
+ if (sbi->procdir) {
+ remove_proc_entry("label", sbi->procdir);
+ remove_proc_entry("volinfo", sbi->procdir);
+ remove_proc_entry(sb->s_id, proc_info_root);
+ sbi->procdir = NULL;
+ }
+#endif
+
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
@@ -711,9 +824,16 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot)
/*
* ntfs_init_from_boot - Init internal info from on-disk boot sector.
+ *
+ * NTFS mount begins from boot - special formatted 512 bytes.
+ * There are two boots: the first and the last 512 bytes of volume.
+ * The content of boot is not changed during ntfs life.
+ *
+ * NOTE: ntfs.sys checks only first (primary) boot.
+ * chkdsk checks both boots.
*/
static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
- u64 dev_size)
+ u64 dev_size, struct NTFS_BOOT **boot2)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
int err;
@@ -724,6 +844,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
struct MFT_REC *rec;
u16 fn, ao;
u8 cluster_bits;
+ u32 boot_off = 0;
+ const char *hint = "Primary boot";
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
@@ -731,11 +853,12 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (!bh)
return -EIO;
+check_boot:
err = -EINVAL;
- boot = (struct NTFS_BOOT *)bh->b_data;
+ boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) {
- ntfs_err(sb, "Boot's signature is not NTFS.");
+ ntfs_err(sb, "%s signature is not NTFS.", hint);
goto out;
}
@@ -748,14 +871,16 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
boot->bytes_per_sector[0];
if (boot_sector_size < SECTOR_SIZE ||
!is_power_of_2(boot_sector_size)) {
- ntfs_err(sb, "Invalid bytes per sector %u.", boot_sector_size);
+ ntfs_err(sb, "%s: invalid bytes per sector %u.", hint,
+ boot_sector_size);
goto out;
}
/* cluster size: 512, 1K, 2K, 4K, ... 2M */
sct_per_clst = true_sectors_per_clst(boot);
if ((int)sct_per_clst < 0 || !is_power_of_2(sct_per_clst)) {
- ntfs_err(sb, "Invalid sectors per cluster %u.", sct_per_clst);
+ ntfs_err(sb, "%s: invalid sectors per cluster %u.", hint,
+ sct_per_clst);
goto out;
}
@@ -771,20 +896,20 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
if (mlcn * sct_per_clst >= sectors || mlcn2 * sct_per_clst >= sectors) {
ntfs_err(
sb,
- "Start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.",
- mlcn, mlcn2, sectors);
+ "%s: start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.",
+ hint, mlcn, mlcn2, sectors);
goto out;
}
sbi->record_size = record_size =
boot->record_size < 0 ? 1 << (-boot->record_size) :
- (u32)boot->record_size << cluster_bits;
+ (u32)boot->record_size << cluster_bits;
sbi->record_bits = blksize_bits(record_size);
sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
/* Check MFT record size. */
if (record_size < SECTOR_SIZE || !is_power_of_2(record_size)) {
- ntfs_err(sb, "Invalid bytes per MFT record %u (%d).",
+ ntfs_err(sb, "%s: invalid bytes per MFT record %u (%d).", hint,
record_size, boot->record_size);
goto out;
}
@@ -796,18 +921,18 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
}
sbi->index_size = boot->index_size < 0 ?
- 1u << (-boot->index_size) :
- (u32)boot->index_size << cluster_bits;
+ 1u << (-boot->index_size) :
+ (u32)boot->index_size << cluster_bits;
/* Check index record size. */
if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
- ntfs_err(sb, "Invalid bytes per index %u(%d).", sbi->index_size,
- boot->index_size);
+ ntfs_err(sb, "%s: invalid bytes per index %u(%d).", hint,
+ sbi->index_size, boot->index_size);
goto out;
}
if (sbi->index_size > MAXIMUM_BYTES_PER_INDEX) {
- ntfs_err(sb, "Unsupported bytes per index %u.",
+ ntfs_err(sb, "%s: unsupported bytes per index %u.", hint,
sbi->index_size);
goto out;
}
@@ -834,7 +959,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
/* Compare boot's cluster and sector. */
if (sbi->cluster_size < boot_sector_size) {
- ntfs_err(sb, "Invalid bytes per cluster (%u).",
+ ntfs_err(sb, "%s: invalid bytes per cluster (%u).", hint,
sbi->cluster_size);
goto out;
}
@@ -850,7 +975,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
}
sbi->max_bytes_per_attr =
- record_size - ALIGN(MFTRECORD_FIXUP_OFFSET_1, 8) -
+ record_size - ALIGN(MFTRECORD_FIXUP_OFFSET, 8) -
ALIGN(((record_size >> SECTOR_SHIFT) * sizeof(short)), 8) -
ALIGN(sizeof(enum ATTR_TYPE), 8);
@@ -892,10 +1017,10 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
sbi->new_rec = rec;
rec->rhdr.sign = NTFS_FILE_SIGNATURE;
- rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET_1);
+ rec->rhdr.fix_off = cpu_to_le16(MFTRECORD_FIXUP_OFFSET);
fn = (sbi->record_size >> SECTOR_SHIFT) + 1;
rec->rhdr.fix_num = cpu_to_le16(fn);
- ao = ALIGN(MFTRECORD_FIXUP_OFFSET_1 + sizeof(short) * fn, 8);
+ ao = ALIGN(MFTRECORD_FIXUP_OFFSET + sizeof(short) * fn, 8);
rec->attr_off = cpu_to_le16(ao);
rec->used = cpu_to_le32(ao + ALIGN(sizeof(enum ATTR_TYPE), 8));
rec->total = cpu_to_le32(sbi->record_size);
@@ -930,7 +1055,34 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
err = 0;
+ if (bh->b_blocknr && !sb_rdonly(sb)) {
+ /*
+ * Alternative boot is ok but primary is not ok.
+ * Do not update primary boot here 'cause it may be faked boot.
+ * Let ntfs to be mounted and update boot later.
+ */
+ *boot2 = kmemdup(boot, sizeof(*boot), GFP_NOFS | __GFP_NOWARN);
+ }
+
out:
+ if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
+ u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
+ u64 lbo = dev_size - sizeof(*boot);
+
+ /*
+ * Try alternative boot (last sector)
+ */
+ brelse(bh);
+
+ sb_set_blocksize(sb, block_size);
+ bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
+ if (!bh)
+ return -EINVAL;
+
+ boot_off = lbo & (block_size - 1);
+ hint = "Alternative boot";
+ goto check_boot;
+ }
brelse(bh);
return err;
@@ -955,6 +1107,8 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
struct ATTR_DEF_ENTRY *t;
u16 *shared;
struct MFT_REF ref;
+ bool ro = sb_rdonly(sb);
+ struct NTFS_BOOT *boot2 = NULL;
ref.high = 0;
@@ -985,7 +1139,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
/* Parse boot. */
err = ntfs_init_from_boot(sb, bdev_logical_block_size(bdev),
- bdev_nr_bytes(bdev));
+ bdev_nr_bytes(bdev), &boot2);
if (err)
goto out;
@@ -1035,6 +1189,10 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
sbi->volume.minor_ver = info->minor_ver;
sbi->volume.flags = info->flags;
sbi->volume.ni = ni;
+ if (info->flags & VOLUME_FLAG_DIRTY) {
+ sbi->volume.real_dirty = true;
+ ntfs_info(sb, "It is recommened to use chkdsk.");
+ }
/* Load $MFTMirr to estimate recs_mirr. */
ref.low = cpu_to_le32(MFT_REC_MIRR);
@@ -1069,21 +1227,16 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
iput(inode);
- if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
- if (!sb_rdonly(sb)) {
- ntfs_warn(sb,
- "failed to replay log file. Can't mount rw!");
- err = -EINVAL;
- goto out;
- }
- } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
- if (!sb_rdonly(sb) && !options->force) {
- ntfs_warn(
- sb,
- "volume is dirty and \"force\" flag is not set!");
- err = -EINVAL;
- goto out;
- }
+ if ((sbi->flags & NTFS_FLAGS_NEED_REPLAY) && !ro) {
+ ntfs_warn(sb, "failed to replay log file. Can't mount rw!");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if ((sbi->volume.flags & VOLUME_FLAG_DIRTY) && !ro && !options->force) {
+ ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
+ err = -EINVAL;
+ goto out;
}
/* Load $MFT. */
@@ -1173,7 +1326,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
bad_len += len;
bad_frags += 1;
- if (sb_rdonly(sb))
+ if (ro)
continue;
if (wnd_set_used_safe(&sbi->used.bitmap, lcn, len, &tt) || tt) {
@@ -1368,6 +1521,43 @@ load_root:
goto put_inode_out;
}
+ if (boot2) {
+ /*
+ * Alternative boot is ok but primary is not ok.
+ * Volume is recognized as NTFS. Update primary boot.
+ */
+ struct buffer_head *bh0 = sb_getblk(sb, 0);
+ if (bh0) {
+ if (buffer_locked(bh0))
+ __wait_on_buffer(bh0);
+
+ lock_buffer(bh0);
+ memcpy(bh0->b_data, boot2, sizeof(*boot2));
+ set_buffer_uptodate(bh0);
+ mark_buffer_dirty(bh0);
+ unlock_buffer(bh0);
+ if (!sync_dirty_buffer(bh0))
+ ntfs_warn(sb, "primary boot is updated");
+ put_bh(bh0);
+ }
+
+ kfree(boot2);
+ }
+
+#ifdef CONFIG_PROC_FS
+ /* Create /proc/fs/ntfs3/.. */
+ if (proc_info_root) {
+ struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root);
+ if (e) {
+ proc_create_data("volinfo", S_IFREG | S_IRUGO, e,
+ &ntfs3_volinfo_fops, sb);
+ proc_create_data("label", S_IFREG | S_IRUGO | S_IWUGO,
+ e, &ntfs3_label_fops, sb);
+ sbi->procdir = e;
+ }
+ }
+#endif
+
return 0;
put_inode_out:
@@ -1380,6 +1570,7 @@ out:
put_mount_options(sbi->options);
put_ntfs(sbi);
sb->s_fs_info = NULL;
+ kfree(boot2);
return err;
}
@@ -1473,12 +1664,14 @@ static void ntfs_fs_free(struct fs_context *fc)
put_mount_options(opts);
}
+// clang-format off
static const struct fs_context_operations ntfs_context_ops = {
.parse_param = ntfs_fs_parse_param,
.get_tree = ntfs_fs_get_tree,
.reconfigure = ntfs_fs_reconfigure,
.free = ntfs_fs_free,
};
+// clang-format on
/*
* ntfs_init_fs_context - Initialize sbi and opts
@@ -1559,6 +1752,12 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
+
+#ifdef CONFIG_PROC_FS
+ /* Create "/proc/fs/ntfs3" */
+ proc_info_root = proc_mkdir("fs/ntfs3", NULL);
+#endif
+
err = ntfs3_init_bitmap();
if (err)
return err;
@@ -1590,6 +1789,12 @@ static void __exit exit_ntfs_fs(void)
kmem_cache_destroy(ntfs_inode_cachep);
unregister_filesystem(&ntfs_fs_type);
ntfs3_exit_bitmap();
+
+#ifdef CONFIG_PROC_FS
+ if (proc_info_root)
+ remove_proc_entry("fs/ntfs3", NULL);
+#endif
+
}
MODULE_LICENSE("GPL");
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index c3de60a4543f..023f314e8950 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -24,7 +24,7 @@
static inline size_t unpacked_ea_size(const struct EA_FULL *ea)
{
return ea->size ? le32_to_cpu(ea->size) :
- ALIGN(struct_size(ea, name,
+ ALIGN(struct_size(ea, name,
1 + ea->name_len +
le16_to_cpu(ea->elength)),
4);
@@ -141,6 +141,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
memset(Add2Ptr(ea_p, size), 0, add_bytes);
+ err = -EINVAL;
/* Check all attributes for consistency. */
for (off = 0; off < size; off += ea_size) {
const struct EA_FULL *ef = Add2Ptr(ea_p, off);
@@ -214,6 +215,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
ea = Add2Ptr(ea_all, off);
ea_size = unpacked_ea_size(ea);
+ if (!ea->name_len)
+ break;
+
if (buffer) {
if (ret + ea->name_len + 1 > bytes_per_buffer) {
err = -ERANGE;
@@ -524,8 +528,8 @@ out:
/*
* ntfs_get_acl - inode_operations::get_acl
*/
-struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap,
- struct dentry *dentry, int type)
+struct posix_acl *ntfs_get_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ int type)
{
struct inode *inode = d_inode(dentry);
struct ntfs_inode *ni = ntfs_i(inode);
@@ -592,8 +596,7 @@ static noinline int ntfs_set_acl_ex(struct mnt_idmap *idmap,
case ACL_TYPE_ACCESS:
/* Do not change i_mode if we are in init_acl */
if (acl && !init_acl) {
- err = posix_acl_update_mode(idmap, inode, &mode,
- &acl);
+ err = posix_acl_update_mode(idmap, inode, &mode, &acl);
if (err)
return err;
}
@@ -816,10 +819,9 @@ out:
* ntfs_setxattr - inode_operations::setxattr
*/
static noinline int ntfs_setxattr(const struct xattr_handler *handler,
- struct mnt_idmap *idmap,
- struct dentry *de, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+ struct mnt_idmap *idmap, struct dentry *de,
+ struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
{
int err = -EINVAL;
struct ntfs_inode *ni = ntfs_i(inode);
diff --git a/fs/pipe.c b/fs/pipe.c
index ceb17d2dfa19..b7e380952fca 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -108,6 +108,16 @@ static inline void __pipe_unlock(struct pipe_inode_info *pipe)
mutex_unlock(&pipe->mutex);
}
+static inline bool __pipe_trylock(struct pipe_inode_info *pipe, bool nonblock)
+{
+ if (!nonblock) {
+ __pipe_lock(pipe);
+ return true;
+ }
+
+ return mutex_trylock(&pipe->mutex);
+}
+
void pipe_double_lock(struct pipe_inode_info *pipe1,
struct pipe_inode_info *pipe2)
{
@@ -234,6 +244,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
bool was_full, wake_next_reader = false;
+ const bool nonblock = iocb->ki_flags & IOCB_NOWAIT;
ssize_t ret;
/* Null read succeeds. */
@@ -241,7 +252,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
return 0;
ret = 0;
- __pipe_lock(pipe);
+ if (!__pipe_trylock(pipe, nonblock))
+ return -EAGAIN;
/*
* We only wake up writers if the pipe was full when we started
@@ -297,7 +309,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
chars = total_len;
}
- error = pipe_buf_confirm(pipe, buf);
+ error = pipe_buf_confirm(pipe, buf, nonblock);
if (error) {
if (!ret)
ret = error;
@@ -342,7 +354,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
break;
if (ret)
break;
- if (filp->f_flags & O_NONBLOCK) {
+ if (filp->f_flags & O_NONBLOCK || nonblock) {
ret = -EAGAIN;
break;
}
@@ -423,12 +435,14 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
ssize_t chars;
bool was_empty = false;
bool wake_next_writer = false;
+ const bool nonblock = iocb->ki_flags & IOCB_NOWAIT;
/* Null write succeeds. */
if (unlikely(total_len == 0))
return 0;
- __pipe_lock(pipe);
+ if (!__pipe_trylock(pipe, nonblock))
+ return -EAGAIN;
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
@@ -461,7 +475,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
offset + chars <= PAGE_SIZE) {
- ret = pipe_buf_confirm(pipe, buf);
+ ret = pipe_buf_confirm(pipe, buf, nonblock);
if (ret)
goto out;
@@ -493,9 +507,18 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
int copied;
if (!page) {
- page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
+ gfp_t gfp = __GFP_HIGHMEM | __GFP_ACCOUNT |
+ __GFP_HARDWALL;
+ int this_ret = -EAGAIN;
+
+ if (!nonblock) {
+ this_ret = -ENOMEM;
+ gfp |= GFP_USER;
+ }
+ page = alloc_page(gfp);
if (unlikely(!page)) {
- ret = ret ? : -ENOMEM;
+ if (!ret)
+ ret = this_ret;
break;
}
pipe->tmp_page = page;
@@ -547,7 +570,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
continue;
/* Wait for buffer space to become available. */
- if (filp->f_flags & O_NONBLOCK) {
+ if (filp->f_flags & O_NONBLOCK || nonblock) {
if (!ret)
ret = -EAGAIN;
break;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index d35bbf35a874..2c2efbe685d8 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -431,6 +431,11 @@ static inline void task_untag_mask(struct seq_file *m, struct mm_struct *mm)
seq_printf(m, "untag_mask:\t%#lx\n", mm_untag_mask(mm));
}
+__weak void arch_proc_pid_thread_features(struct seq_file *m,
+ struct task_struct *task)
+{
+}
+
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
@@ -455,6 +460,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
task_cpus_allowed(m, task);
cpuset_task_status_allowed(m, task);
task_context_switch_counts(m, task);
+ arch_proc_pid_thread_features(m, task);
return 0;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 420510f6a545..38b19a757281 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -711,6 +711,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
[ilog2(VM_UFFD_MINOR)] = "ui",
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+ [ilog2(VM_SHADOW_STACK)] = "ss",
+#endif
};
size_t i;
diff --git a/fs/splice.c b/fs/splice.c
index 3e06611d19ae..c24a69ef85be 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -117,13 +117,16 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
* is a page cache page, IO may be in flight.
*/
static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+ struct pipe_buffer *buf, bool nonblock)
{
struct page *page = buf->page;
int err;
if (!PageUptodate(page)) {
- lock_page(page);
+ if (nonblock && !trylock_page(page))
+ return -EAGAIN;
+ else
+ lock_page(page);
/*
* Page got truncated/unhashed. This will cause a 0-byte
@@ -515,7 +518,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
if (sd->len > sd->total_len)
sd->len = sd->total_len;
- ret = pipe_buf_confirm(pipe, buf);
+ ret = pipe_buf_confirm(pipe, buf, false);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
@@ -778,7 +781,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
continue;
this_len = min(this_len, left);
- ret = pipe_buf_confirm(pipe, buf);
+ ret = pipe_buf_confirm(pipe, buf, false);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 1b078bbbf225..9b373a0c7aaf 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -495,10 +495,12 @@ xfs_freesp_init_recs(
ASSERT(start >= mp->m_ag_prealloc_blocks);
if (start != mp->m_ag_prealloc_blocks) {
/*
- * Modify first record to pad stripe align of log
+ * Modify first record to pad stripe align of log and
+ * bump the record count.
*/
arec->ar_blockcount = cpu_to_be32(start -
mp->m_ag_prealloc_blocks);
+ be16_add_cpu(&block->bb_numrecs, 1);
nrec = arec + 1;
/*
@@ -509,7 +511,6 @@ xfs_freesp_init_recs(
be32_to_cpu(arec->ar_startblock) +
be32_to_cpu(arec->ar_blockcount));
arec = nrec;
- be16_add_cpu(&block->bb_numrecs, 1);
}
/*
* Change record start to after the internal log
@@ -518,15 +519,13 @@ xfs_freesp_init_recs(
}
/*
- * Calculate the record block count and check for the case where
- * the log might have consumed all available space in the AG. If
- * so, reset the record count to 0 to avoid exposure of an invalid
- * record start block.
+ * Calculate the block count of this record; if it is nonzero,
+ * increment the record count.
*/
arec->ar_blockcount = cpu_to_be32(id->agsize -
be32_to_cpu(arec->ar_startblock));
- if (!arec->ar_blockcount)
- block->bb_numrecs = 0;
+ if (arec->ar_blockcount)
+ be16_add_cpu(&block->bb_numrecs, 1);
}
/*
@@ -538,7 +537,7 @@ xfs_bnoroot_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
- xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
+ xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
@@ -548,7 +547,7 @@ xfs_cntroot_init(
struct xfs_buf *bp,
struct aghdr_init_data *id)
{
- xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
+ xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 0, id->agno);
xfs_freesp_init_recs(mp, bp, id);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index b512de0540d5..cd8870a16fd1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3494,8 +3494,10 @@ xfs_bmap_btalloc_at_eof(
if (!caller_pag)
args->pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ap->blkno));
error = xfs_alloc_vextent_exact_bno(args, ap->blkno);
- if (!caller_pag)
+ if (!caller_pag) {
xfs_perag_put(args->pag);
+ args->pag = NULL;
+ }
if (error)
return error;
@@ -3505,7 +3507,6 @@ xfs_bmap_btalloc_at_eof(
* Exact allocation failed. Reset to try an aligned allocation
* according to the original allocation specification.
*/
- args->pag = NULL;
args->alignment = stripe_align;
args->minlen = nextminlen;
args->minalignslop = 0;
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 87ab9f95a487..69bc89d0fc68 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -42,12 +42,12 @@ xchk_setup_inode_bmap(
xfs_ilock(sc->ip, XFS_IOLOCK_EXCL);
/*
- * We don't want any ephemeral data fork updates sitting around
+ * We don't want any ephemeral data/cow fork updates sitting around
* while we inspect block mappings, so wait for directio to finish
* and flush dirty data if we have delalloc reservations.
*/
if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
- sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
+ sc->sm->sm_type != XFS_SCRUB_TYPE_BMBTA) {
struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
sc->ilock_flags |= XFS_MMAPLOCK_EXCL;
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 9aa79665c608..7a20256be969 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -1164,32 +1164,6 @@ xchk_metadata_inode_forks(
return 0;
}
-/* Pause background reaping of resources. */
-void
-xchk_stop_reaping(
- struct xfs_scrub *sc)
-{
- sc->flags |= XCHK_REAPING_DISABLED;
- xfs_blockgc_stop(sc->mp);
- xfs_inodegc_stop(sc->mp);
-}
-
-/* Restart background reaping of resources. */
-void
-xchk_start_reaping(
- struct xfs_scrub *sc)
-{
- /*
- * Readonly filesystems do not perform inactivation or speculative
- * preallocation, so there's no need to restart the workers.
- */
- if (!xfs_is_readonly(sc->mp)) {
- xfs_inodegc_start(sc->mp);
- xfs_blockgc_start(sc->mp);
- }
- sc->flags &= ~XCHK_REAPING_DISABLED;
-}
-
/*
* Enable filesystem hooks (i.e. runtime code patching) before starting a scrub
* operation. Callers must not hold any locks that intersect with the CPU
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 18b5f2b62f13..791235cd9b00 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -156,8 +156,6 @@ static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
}
int xchk_metadata_inode_forks(struct xfs_scrub *sc);
-void xchk_stop_reaping(struct xfs_scrub *sc);
-void xchk_start_reaping(struct xfs_scrub *sc);
/*
* Setting up a hook to wait for intents to drain is costly -- we have to take
diff --git a/fs/xfs/scrub/fscounters.c b/fs/xfs/scrub/fscounters.c
index faa315be7978..e382a35e98d8 100644
--- a/fs/xfs/scrub/fscounters.c
+++ b/fs/xfs/scrub/fscounters.c
@@ -150,13 +150,6 @@ xchk_setup_fscounters(
if (error)
return error;
- /*
- * Pause background reclaim while we're scrubbing to reduce the
- * likelihood of background perturbations to the counters throwing off
- * our calculations.
- */
- xchk_stop_reaping(sc);
-
return xchk_trans_alloc(sc, 0);
}
@@ -454,6 +447,12 @@ xchk_fscounters(
xchk_set_corrupt(sc);
/*
+ * XXX: We can't quiesce percpu counter updates, so exit early.
+ * This can be re-enabled when we gain exclusive freeze functionality.
+ */
+ return 0;
+
+ /*
* If ifree exceeds icount by more than the minimum variance then
* something's probably wrong with the counters.
*/
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 02819bedc5b1..3d98f604765e 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -186,8 +186,6 @@ xchk_teardown(
}
if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
mnt_drop_write_file(sc->file);
- if (sc->flags & XCHK_REAPING_DISABLED)
- xchk_start_reaping(sc);
if (sc->buf) {
if (sc->buf_cleanup)
sc->buf_cleanup(sc->buf);
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index e71903474cd7..b38e93830dde 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -106,7 +106,6 @@ struct xfs_scrub {
/* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
#define XCHK_TRY_HARDER (1 << 0) /* can't get resources, try again */
-#define XCHK_REAPING_DISABLED (1 << 1) /* background block reaping paused */
#define XCHK_FSGATES_DRAIN (1 << 2) /* defer ops draining enabled */
#define XCHK_NEED_DRAIN (1 << 3) /* scrub needs to drain defer ops */
#define XREP_ALREADY_FIXED (1 << 31) /* checking our repair work */
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 68efd6fda61c..b3894daeb86a 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -98,7 +98,6 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS);
#define XFS_SCRUB_STATE_STRINGS \
{ XCHK_TRY_HARDER, "try_harder" }, \
- { XCHK_REAPING_DISABLED, "reaping_disabled" }, \
{ XCHK_FSGATES_DRAIN, "fsgates_drain" }, \
{ XCHK_NEED_DRAIN, "need_drain" }, \
{ XREP_ALREADY_FIXED, "already_fixed" }
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index f032d3a4b727..fbb675563208 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -558,7 +558,9 @@ xfs_getbmap(
if (!xfs_iext_next_extent(ifp, &icur, &got)) {
xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
- out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
+ if (bmv->bmv_entries > 0)
+ out[bmv->bmv_entries - 1].bmv_oflags |=
+ BMV_OF_LAST;
if (whichfork != XFS_ATTR_FORK && bno < end &&
!xfs_getbmap_full(bmv)) {
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 351849fc18ff..0f60e301eb1f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -435,18 +435,23 @@ xfs_iget_check_free_state(
}
/* Make all pending inactivation work start immediately. */
-static void
+static bool
xfs_inodegc_queue_all(
struct xfs_mount *mp)
{
struct xfs_inodegc *gc;
int cpu;
+ bool ret = false;
for_each_online_cpu(cpu) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
- if (!llist_empty(&gc->list))
+ if (!llist_empty(&gc->list)) {
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
+ ret = true;
+ }
}
+
+ return ret;
}
/*
@@ -1856,6 +1861,8 @@ xfs_inodegc_worker(
struct xfs_inode *ip, *n;
unsigned int nofs_flag;
+ ASSERT(gc->cpu == smp_processor_id());
+
WRITE_ONCE(gc->items, 0);
if (!node)
@@ -1909,24 +1916,41 @@ xfs_inodegc_flush(
/*
* Flush all the pending work and then disable the inode inactivation background
- * workers and wait for them to stop.
+ * workers and wait for them to stop. Caller must hold sb->s_umount to
+ * coordinate changes in the inodegc_enabled state.
*/
void
xfs_inodegc_stop(
struct xfs_mount *mp)
{
+ bool rerun;
+
if (!xfs_clear_inodegc_enabled(mp))
return;
+ /*
+ * Drain all pending inodegc work, including inodes that could be
+ * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
+ * threads that sample the inodegc state just prior to us clearing it.
+ * The inodegc flag state prevents new threads from queuing more
+ * inodes, so we queue pending work items and flush the workqueue until
+ * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
+ * here because it does not allow other unserialized mechanisms to
+ * reschedule inodegc work while this draining is in progress.
+ */
xfs_inodegc_queue_all(mp);
- drain_workqueue(mp->m_inodegc_wq);
+ do {
+ flush_workqueue(mp->m_inodegc_wq);
+ rerun = xfs_inodegc_queue_all(mp);
+ } while (rerun);
trace_xfs_inodegc_stop(mp, __return_address);
}
/*
* Enable the inode inactivation background workers and schedule deferred inode
- * inactivation work if there is any.
+ * inactivation work if there is any. Caller must hold sb->s_umount to
+ * coordinate changes in the inodegc_enabled state.
*/
void
xfs_inodegc_start(
@@ -2069,7 +2093,8 @@ xfs_inodegc_queue(
queue_delay = 0;
trace_xfs_inodegc_queue(mp, __return_address);
- mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
+ mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
+ queue_delay);
put_cpu_ptr(gc);
if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
@@ -2113,7 +2138,8 @@ xfs_inodegc_cpu_dead(
if (xfs_is_inodegc_enabled(mp)) {
trace_xfs_inodegc_queue(mp, __return_address);
- mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
+ mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
+ 0);
}
put_cpu_ptr(gc);
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 285885c308bd..18c8f168b153 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1006,8 +1006,9 @@ xfs_buffered_write_iomap_begin(
if (eof)
imap.br_startoff = end_fsb; /* fake hole until the end */
- /* We never need to allocate blocks for zeroing a hole. */
- if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
+ /* We never need to allocate blocks for zeroing or unsharing a hole. */
+ if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
+ imap.br_startoff > offset_fsb) {
xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
goto out_unlock;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index f3269c0626f0..aaaf5ec13492 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -66,6 +66,9 @@ struct xfs_inodegc {
/* approximate count of inodes in the list */
unsigned int items;
unsigned int shrinker_hits;
+#if defined(DEBUG) || defined(XFS_WARN)
+ unsigned int cpu;
+#endif
};
/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 4d2e87462ac4..7e706255f165 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1095,6 +1095,9 @@ xfs_inodegc_init_percpu(
for_each_possible_cpu(cpu) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
+#if defined(DEBUG) || defined(XFS_WARN)
+ gc->cpu = cpu;
+#endif
init_llist_head(&gc->list);
gc->items = 0;
INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d7f6335d3999..e86c830728de 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -20,9 +20,9 @@ static inline unsigned long huge_pte_dirty(pte_t pte)
return pte_dirty(pte);
}
-static inline pte_t huge_pte_mkwrite(pte_t pte)
+static inline pte_t huge_pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite(pte, vma);
}
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 358db4a9f167..fed20f2cecc7 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -286,9 +286,8 @@
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
-# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK 0x06
-# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY 0x08
+# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK (0x3 << 5) /* eDP 1.5 & DP 2.0 */
+# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY (1 << 7) /* eDP 1.5 & DP 2.0 */
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -983,6 +982,7 @@
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
@@ -1008,6 +1008,7 @@
# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE (1 << 7)
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
@@ -1032,6 +1033,7 @@
#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+#define DP_EDP_PANEL_TARGET_LUMINANCE_VALUE 0x734
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 533d3ee7fe05..86f24a759268 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -181,9 +181,8 @@ static inline u16
drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
- (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
- DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
- DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+ ((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8);
}
static inline u32
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 977ca195a536..a4eff85b1f44 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -222,7 +222,7 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
struct ttm_tt *tt);
-
+unsigned long ttm_tt_pages_limit(void);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index c87aeecaa9b2..583fe3b49a49 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -96,6 +96,7 @@
/* FFA Bus/Device/Driver related */
struct ffa_device {
+ u32 id;
int vm_id;
bool mode_32bit;
uuid_t uuid;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e53ceee1df37..456f33b9d205 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1197,7 +1197,7 @@ enum bpf_dynptr_type {
};
int bpf_dynptr_check_size(u32 size);
-u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr);
+u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 3dd29a53b711..5b11a3b0fec0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -18,8 +18,11 @@
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ (1 << 29)
-/* size of type_str_buf in bpf_verifier. */
-#define TYPE_STR_BUF_LEN 128
+/* size of tmp_str_buf in bpf_verifier.
+ * we need at least 306 bytes to fit full stack mask representation
+ * (in the "-8,-16,...,-512" form)
+ */
+#define TMP_STR_BUF_LEN 320
/* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that
@@ -238,6 +241,10 @@ enum bpf_stack_slot_type {
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
+ (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
+ (1 << BPF_REG_5))
+
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
@@ -541,6 +548,15 @@ struct bpf_subprog_info {
bool is_async_cb;
};
+struct bpf_verifier_env;
+
+struct backtrack_state {
+ struct bpf_verifier_env *env;
+ u32 frame;
+ u32 reg_masks[MAX_CALL_FRAMES];
+ u64 stack_masks[MAX_CALL_FRAMES];
+};
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -578,6 +594,7 @@ struct bpf_verifier_env {
int *insn_stack;
int cur_stack;
} cfg;
+ struct backtrack_state bt;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -606,8 +623,10 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
- /* buffer used in reg_type_str() to generate reg_type string */
- char type_str_buf[TYPE_STR_BUF_LEN];
+ /* buffer used to generate temporary string representations,
+ * e.g., in reg_type_str() to generate reg_type string
+ */
+ char tmp_str_buf[TMP_STR_BUF_LEN];
};
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 1520793c72da..1bd73cefd311 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -217,6 +217,10 @@ int inode_has_buffers(struct inode *);
void invalidate_inode_buffers(struct inode *);
int remove_inode_buffers(struct inode *inode);
int sync_mapping_buffers(struct address_space *mapping);
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync);
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync);
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
sector_t len);
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 885f5395fcd0..567c547cf371 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -692,7 +692,6 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
*/
void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp);
void cgroup_rstat_flush_hold(struct cgroup *cgrp);
void cgroup_rstat_flush_release(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 980b76a1237e..d629094fac6e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -71,8 +71,10 @@ extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
extern void cpuset_wait_for_hotplug(void);
-extern void cpuset_read_lock(void);
-extern void cpuset_read_unlock(void);
+extern void inc_dl_tasks_cs(struct task_struct *task);
+extern void dec_dl_tasks_cs(struct task_struct *task);
+extern void cpuset_lock(void);
+extern void cpuset_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -189,8 +191,10 @@ static inline void cpuset_update_active_cpus(void)
static inline void cpuset_wait_for_hotplug(void) { }
-static inline void cpuset_read_lock(void) { }
-static inline void cpuset_read_unlock(void) { }
+static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+static inline void dec_dl_tasks_cs(struct task_struct *task) { }
+static inline void cpuset_lock(void) { }
+static inline void cpuset_unlock(void) { }
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0261e2fb3636..95b07f8b77fe 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -3,6 +3,7 @@
#define _LINUX_I8042_H
+#include <linux/errno.h>
#include <linux/types.h>
/*
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
index dd64e544a3da..9cb6c80dea71 100644
--- a/include/linux/iio/iio-gts-helper.h
+++ b/include/linux/iio/iio-gts-helper.h
@@ -135,7 +135,7 @@ static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel)
/**
* iio_gts_find_sel_by_int_time - find selector matching integration time
* @gts: Gain time scale descriptor
- * @gain: HW-gain for which matching selector is searched for
+ * @time: Integration time for which matching selector is searched for
*
* Return: a selector matching given integration time or -EINVAL if
* selector was not found.
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 3399d979ee1c..7fe31b2cd02f 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -36,6 +36,11 @@ struct io_uring_cmd {
u8 pdu[32]; /* available inline for free use */
};
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
@@ -66,11 +71,6 @@ static inline void io_uring_free(struct task_struct *tsk)
if (tsk->io_uring)
__io_uring_free(tsk);
}
-
-static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
-{
- return sqe->cmd;
-}
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b32256e9e944..74bd269a80a2 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -344,6 +344,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
+/*
+ * Must use lock_map_aquire_try() with override maps to avoid
+ * lockdep thinking they participate in the block chain.
+ */
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map _name = { \
+ .name = #_name "-wait-type-override", \
+ .wait_type_inner = _wait_type, \
+ .lock_type = LD_LOCK_WAIT_OVERRIDE, }
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task)
@@ -432,6 +442,9 @@ extern int lockdep_is_held(const void *);
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map __maybe_unused _name = {}
+
#endif /* !LOCKDEP */
enum xhlock_context_t {
@@ -556,6 +569,7 @@ do { \
#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_release(l) lock_release(l, _THIS_IP_)
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index d22430840b53..59f4fb1626ea 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -33,6 +33,7 @@ enum lockdep_wait_type {
enum lockdep_lock_type {
LD_LOCK_NORMAL = 0, /* normal, catch all */
LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_WAIT_OVERRIDE, /* annotation */
LD_LOCK_MAX,
};
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 1fadb5f5978b..542b09118a09 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -455,6 +455,7 @@ void *mas_erase(struct ma_state *mas);
int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
void mas_store_prealloc(struct ma_state *mas, void *entry);
void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
int mas_preallocate(struct ma_state *mas, gfp_t gfp);
bool mas_is_err(struct ma_state *mas);
@@ -466,7 +467,9 @@ void mas_destroy(struct ma_state *mas);
int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_prev_range(struct ma_state *mas, unsigned long max);
void *mas_next(struct ma_state *mas, unsigned long max);
+void *mas_next_range(struct ma_state *mas, unsigned long max);
int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
unsigned long size);
@@ -482,13 +485,13 @@ static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
}
/* Checks if a mas has not found anything */
-static inline bool mas_is_none(struct ma_state *mas)
+static inline bool mas_is_none(const struct ma_state *mas)
{
return mas->node == MAS_NONE;
}
/* Checks if a mas has been paused */
-static inline bool mas_is_paused(struct ma_state *mas)
+static inline bool mas_is_paused(const struct ma_state *mas)
{
return mas->node == MAS_PAUSE;
}
@@ -528,6 +531,19 @@ static inline void mas_reset(struct ma_state *mas)
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+/**
+ * mas_contiguous() - Iterate over a contiguous range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range of the
+ * entry. The loop will terminate on the first NULL encountered.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_contiguous(__mas, __entry, __max) \
+ while (((__entry) = mas_find_range((__mas), (__max))) != NULL)
/**
* mas_set_range() - Set up Maple Tree operation state for a different index.
@@ -616,7 +632,7 @@ static inline void mt_clear_in_rcu(struct maple_tree *mt)
return;
if (mt_external_lock(mt)) {
- BUG_ON(!mt_lock_is_held(mt));
+ WARN_ON(!mt_lock_is_held(mt));
mt->ma_flags &= ~MT_FLAGS_USE_RCU;
} else {
mtree_lock(mt);
@@ -635,7 +651,7 @@ static inline void mt_set_in_rcu(struct maple_tree *mt)
return;
if (mt_external_lock(mt)) {
- BUG_ON(!mt_lock_is_held(mt));
+ WARN_ON(!mt_lock_is_held(mt));
mt->ma_flags |= MT_FLAGS_USE_RCU;
} else {
mtree_lock(mt);
@@ -670,10 +686,17 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
extern atomic_t maple_tree_tests_run;
extern atomic_t maple_tree_tests_passed;
-void mt_dump(const struct maple_tree *mt);
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
void mt_validate(struct maple_tree *mt);
void mt_cache_shrink(void);
#define MT_BUG_ON(__tree, __x) do { \
@@ -681,7 +704,40 @@ void mt_cache_shrink(void);
if (__x) { \
pr_info("BUG at %s:%d (%u)\n", \
__func__, __LINE__, __x); \
- mt_dump(__tree); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
pr_info("Pass: %u Run:%u\n", \
atomic_read(&maple_tree_tests_passed), \
atomic_read(&maple_tree_tests_run)); \
@@ -690,8 +746,67 @@ void mt_cache_shrink(void);
atomic_inc(&maple_tree_tests_passed); \
} \
} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
#else
-#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
#endif /* CONFIG_DEBUG_MAPLE_TREE */
#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 222d7370134c..00a88cf947e1 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1038,7 +1038,6 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
}
void mem_cgroup_flush_stats(void);
-void mem_cgroup_flush_stats_atomic(void);
void mem_cgroup_flush_stats_ratelimited(void);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
@@ -1537,10 +1536,6 @@ static inline void mem_cgroup_flush_stats(void)
{
}
-static inline void mem_cgroup_flush_stats_atomic(void)
-{
-}
-
static inline void mem_cgroup_flush_stats_ratelimited(void)
{
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 27ce77080c79..e5d7b65075a0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -319,11 +319,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
+#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -339,6 +341,41 @@ extern unsigned int kobjsize(const void *objp);
#endif
#endif /* CONFIG_ARCH_HAS_PKEYS */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+/*
+ * This flag should not be set with VM_SHARED because of lack of support
+ * core mm. It will also get a guard page. This helps userspace protect
+ * itself from attacks. The reasoning is as follows:
+ *
+ * The shadow stack pointer(SSP) is moved by CALL, RET, and INCSSPQ. The
+ * INCSSP instruction can increment the shadow stack pointer. It is the
+ * shadow stack analog of an instruction like:
+ *
+ * addq $0x80, %rsp
+ *
+ * However, there is one important difference between an ADD on %rsp
+ * and INCSSP. In addition to modifying SSP, INCSSP also reads from the
+ * memory of the first and last elements that were "popped". It can be
+ * thought of as acting like this:
+ *
+ * READ_ONCE(ssp); // read+discard top element on stack
+ * ssp += nr_to_pop * 8; // move the shadow stack
+ * READ_ONCE(ssp-8); // read+discard last popped stack element
+ *
+ * The maximum distance INCSSP can move the SSP is 2040 bytes, before
+ * it would read the memory. Therefore a single page gap will be enough
+ * to prevent any operation from shifting the SSP to an adjacent stack,
+ * since it would have to land in the gap at least once, causing a
+ * fault.
+ *
+ * Prevent using INCSSP to move the SSP between shadow stacks by
+ * having a PAGE_SIZE guard gap.
+ */
+# define VM_SHADOW_STACK VM_HIGH_ARCH_5
+#else
+# define VM_SHADOW_STACK VM_NONE
+#endif
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
@@ -370,7 +407,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 37
+# define VM_UFFD_MINOR_BIT 38
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
@@ -866,11 +903,24 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
return mas_find(&vmi->mas, ULONG_MAX);
}
+static inline
+struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
+{
+ return mas_next_range(&vmi->mas, ULONG_MAX);
+}
+
+
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
{
return mas_prev(&vmi->mas, 0);
}
+static inline
+struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
+{
+ return mas_prev_range(&vmi->mas, 0);
+}
+
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
{
return vmi->mas.index;
@@ -1284,7 +1334,7 @@ void free_compound_page(struct page *page);
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
return pte;
}
@@ -2422,6 +2472,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
@@ -3133,7 +3184,8 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate, struct list_head *uf);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool downgrade);
@@ -3226,15 +3278,26 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
+static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_GROWSDOWN)
+ return stack_guard_gap;
+
+ /* See reasoning around the VM_SHADOW_STACK definition */
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
+}
+
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
+ unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
- if (vma->vm_flags & VM_GROWSDOWN) {
- vm_start -= stack_guard_gap;
- if (vm_start > vma->vm_start)
- vm_start = 0;
- }
+ vm_start -= gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
return vm_start;
}
diff --git a/include/linux/mman.h b/include/linux/mman.h
index cee1e4b566d8..40d94411d492 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -15,6 +15,9 @@
#ifndef MAP_32BIT
#define MAP_32BIT 0
#endif
+#ifndef MAP_ABOVE4G
+#define MAP_ABOVE4G 0
+#endif
#ifndef MAP_HUGE_2MB
#define MAP_HUGE_2MB 0
#endif
@@ -50,6 +53,7 @@
| MAP_STACK \
| MAP_HUGETLB \
| MAP_32BIT \
+ | MAP_ABOVE4G \
| MAP_HUGE_2MB \
| MAP_HUGE_1GB)
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index b8728d11c949..7c3e7b0b0e8f 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -8,10 +8,12 @@
struct page;
struct vm_area_struct;
struct mm_struct;
+struct vma_iterator;
void dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \
+ static bool __section(".data.once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_mm(mm); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm);
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
#endif
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index c5a51481bbb9..023918666dd4 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -291,6 +291,20 @@ static inline bool arch_has_hw_pte_young(void)
}
#endif
+#ifndef arch_check_zapped_pte
+static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
+ pte_t pte)
+{
+}
+#endif
+
+#ifndef arch_check_zapped_pmd
+static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index d2c3f16cf6b1..5b1a291b9394 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -100,7 +100,8 @@ struct pipe_buf_operations {
* hook. Returns 0 for good, or a negative error value in case of
* error. If not present all pages are considered good.
*/
- int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *,
+ bool nonblock);
/*
* When the contents of this pipe buffer has been completely
@@ -207,13 +208,14 @@ static inline void pipe_buf_release(struct pipe_inode_info *pipe,
* pipe_buf_confirm - verify contents of the pipe buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to confirm
+ * @nonblock: whether the operation should be nonblocking
*/
static inline int pipe_buf_confirm(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+ struct pipe_buffer *buf, bool nonblock)
{
if (!buf->ops->confirm)
return 0;
- return buf->ops->confirm(pipe, buf);
+ return buf->ops->confirm(pipe, buf, nonblock);
}
/**
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 3101152ce449..1d6e6c424fc6 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -36,6 +36,7 @@ struct s3c64xx_spi_info {
int src_clk_nr;
int num_cs;
bool no_cs;
+ bool polling;
int (*cfg_gpio)(void);
};
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index a1aa68141d0b..7c8d65414a70 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_BQ27X00_BATTERY_H__
#define __LINUX_BQ27X00_BATTERY_H__
+#include <linux/power_supply.h>
+
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
@@ -68,7 +70,9 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
+ bool removed;
unsigned long last_update;
+ union power_supply_propval last_status;
struct delayed_work work;
struct power_supply *bat;
struct list_head list;
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 0260f5ea98fe..80ff8e533cbd 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -158,6 +158,8 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task);
+
#else /* CONFIG_PROC_FS */
static inline void proc_root_init(void)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index c2b9cc5db824..f820bd44d16f 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1658,8 +1658,7 @@ struct regmap_irq_chip {
int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data);
- int (*handle_mask_sync)(struct regmap *map, int index,
- unsigned int mask_buf_def,
+ int (*handle_mask_sync)(int index, unsigned int mask_buf_def,
unsigned int mask_buf, void *irq_drv_data);
int (*set_type_virt)(unsigned int **buf, unsigned int type,
unsigned long hwirq, int reg);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eed5d65b8d1f..2553918f0b61 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1852,7 +1852,9 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
+extern int task_can_attach(struct task_struct *p);
+extern int dl_bw_alloc(int cpu, u64 dl_bw);
+extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 7bde8e1c228a..224293b2dd06 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -107,7 +107,10 @@ extern void synchronize_shrinkers(void);
#ifdef CONFIG_SHRINKER_DEBUG
extern int shrinker_debugfs_add(struct shrinker *shrinker);
-extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
+extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+ int *debugfs_id);
+extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+ int debugfs_id);
extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
const char *fmt, ...);
#else /* CONFIG_SHRINKER_DEBUG */
@@ -115,10 +118,16 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
{
return 0;
}
-static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+ int *debugfs_id)
{
+ *debugfs_id = -1;
return NULL;
}
+static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+ int debugfs_id)
+{
+}
static inline __printf(2, 3)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 738776ab8838..8ddb4af1a501 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4033,7 +4033,7 @@ __skb_header_pointer(const struct sk_buff *skb, int offset, int len,
if (likely(hlen - offset >= len))
return (void *)data + offset;
- if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
+ if (!skb || !buffer || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
return NULL;
return buffer;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3c69cb653cb9..b2128df5edea 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -368,6 +368,7 @@ static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
}
/* linux/mm/workingset.c */
+bool workingset_test_recent(void *shadow, bool file, bool *workingset);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
void workingset_refault(struct folio *folio, void *shadow);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 33a0ee3bcb2e..f97cc87533bd 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -72,6 +72,8 @@ struct open_how;
struct mount_attr;
struct landlock_ruleset_attr;
enum landlock_rule_type;
+struct cachestat_range;
+struct cachestat;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -1058,6 +1060,10 @@ asmlinkage long sys_memfd_secret(unsigned int flags);
asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len,
unsigned long home_node,
unsigned long flags);
+asmlinkage long sys_cachestat(unsigned int fd,
+ struct cachestat_range __user *cstat_range,
+ struct cachestat __user *cstat, unsigned int flags);
+asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 77693389c3f9..6a1e8f157255 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -282,6 +282,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
TPM_CHIP_FLAG_FIRMWARE_UPGRADE = BIT(7),
+ TPM_CHIP_FLAG_SUSPENDED = BIT(8),
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 3992c994787f..91d1d6e671c7 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -614,6 +614,8 @@ extern void __warn_flushing_systemwide_wq(void)
({ \
if (0) \
__warn_flushing_systemwide_wq(); \
+ pr_info("Please don't flush events workqueue at %s:%d\n", __FILE__, __LINE__); \
+ show_one_workqueue(system_wq); \
__flush_workqueue(system_wq); \
})
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index a6c8aee2f256..8baf34639939 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1327,7 +1327,7 @@ int hci_le_create_cis(struct hci_conn *conn);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
u8 role);
-int hci_conn_del(struct hci_conn *conn);
+void hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
void hci_conn_check_pending(struct hci_dev *hdev);
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 0c2778a836db..e00057984489 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -178,12 +178,15 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
* setting.
* @WPAN_PHY_FLAG_STATE_QUEUE_STOPPED: Indicates that the transmit queue was
* temporarily stopped.
+ * @WPAN_PHY_FLAG_DATAGRAMS_ONLY: Indicates that transceiver is only able to
+ * send/receive datagrams.
*/
enum wpan_phy_flags {
WPAN_PHY_FLAG_TXPOWER = BIT(1),
WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2),
WPAN_PHY_FLAG_CCA_MODE = BIT(3),
WPAN_PHY_FLAG_STATE_QUEUE_STOPPED = BIT(4),
+ WPAN_PHY_FLAG_DATAGRAMS_ONLY = BIT(5),
};
struct wpan_phy {
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index da8a3e648c7a..063313df447d 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -74,6 +74,10 @@ struct ieee802154_beacon_hdr {
#endif
} __packed;
+struct ieee802154_mac_cmd_pl {
+ u8 cmd_id;
+} __packed;
+
struct ieee802154_sechdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 level:3,
@@ -149,6 +153,16 @@ struct ieee802154_beacon_frame {
struct ieee802154_beacon_hdr mac_pl;
};
+struct ieee802154_mac_cmd_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+};
+
+struct ieee802154_beacon_req_frame {
+ struct ieee802154_hdr mhr;
+ struct ieee802154_mac_cmd_pl mac_pl;
+};
+
/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
* the contents of hdr will be, and the actual value of those bits in
* hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
@@ -174,9 +188,13 @@ int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
*/
int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr);
-/* pushes a beacon frame into an skb */
+/* pushes/pulls various frame types into/from an skb */
int ieee802154_beacon_push(struct sk_buff *skb,
struct ieee802154_beacon_frame *beacon);
+int ieee802154_mac_cmd_push(struct sk_buff *skb, void *frame,
+ const void *pl, unsigned int pl_len);
+int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb,
+ struct ieee802154_mac_cmd_pl *mac_pl);
int ieee802154_max_payload(const struct ieee802154_hdr *hdr);
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index d318c769b445..a8d7b8a3688a 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -180,7 +180,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
if (likely(!cross_pg))
return false;
- return pool->dma_pages_cnt &&
+ return pool->dma_pages &&
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
}
diff --git a/include/sound/core.h b/include/sound/core.h
index 3edc4ab08774..4ea5f66b59d7 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -98,7 +98,7 @@ struct snd_card {
struct device ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
- struct rw_semaphore controls_rwsem; /* controls list lock */
+ struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
int controls_count; /* count of all controls */
size_t user_ctl_alloc_size; // current memory allocation by user controls.
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index e9d412d19dbb..597677acc6b1 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -18,97 +18,6 @@
#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
-enum afs_call_trace {
- afs_call_trace_alloc,
- afs_call_trace_free,
- afs_call_trace_get,
- afs_call_trace_put,
- afs_call_trace_wake,
- afs_call_trace_work,
-};
-
-enum afs_server_trace {
- afs_server_trace_alloc,
- afs_server_trace_callback,
- afs_server_trace_destroy,
- afs_server_trace_free,
- afs_server_trace_gc,
- afs_server_trace_get_by_addr,
- afs_server_trace_get_by_uuid,
- afs_server_trace_get_caps,
- afs_server_trace_get_install,
- afs_server_trace_get_new_cbi,
- afs_server_trace_get_probe,
- afs_server_trace_give_up_cb,
- afs_server_trace_purging,
- afs_server_trace_put_call,
- afs_server_trace_put_cbi,
- afs_server_trace_put_find_rsq,
- afs_server_trace_put_probe,
- afs_server_trace_put_slist,
- afs_server_trace_put_slist_isort,
- afs_server_trace_put_uuid_rsq,
- afs_server_trace_update,
-};
-
-
-enum afs_volume_trace {
- afs_volume_trace_alloc,
- afs_volume_trace_free,
- afs_volume_trace_get_alloc_sbi,
- afs_volume_trace_get_cell_insert,
- afs_volume_trace_get_new_op,
- afs_volume_trace_get_query_alias,
- afs_volume_trace_put_cell_dup,
- afs_volume_trace_put_cell_root,
- afs_volume_trace_put_destroy_sbi,
- afs_volume_trace_put_free_fc,
- afs_volume_trace_put_put_op,
- afs_volume_trace_put_query_alias,
- afs_volume_trace_put_validate_fc,
- afs_volume_trace_remove,
-};
-
-enum afs_cell_trace {
- afs_cell_trace_alloc,
- afs_cell_trace_free,
- afs_cell_trace_get_queue_dns,
- afs_cell_trace_get_queue_manage,
- afs_cell_trace_get_queue_new,
- afs_cell_trace_get_vol,
- afs_cell_trace_insert,
- afs_cell_trace_manage,
- afs_cell_trace_put_candidate,
- afs_cell_trace_put_destroy,
- afs_cell_trace_put_queue_fail,
- afs_cell_trace_put_queue_work,
- afs_cell_trace_put_vol,
- afs_cell_trace_see_source,
- afs_cell_trace_see_ws,
- afs_cell_trace_unuse_alias,
- afs_cell_trace_unuse_check_alias,
- afs_cell_trace_unuse_delete,
- afs_cell_trace_unuse_fc,
- afs_cell_trace_unuse_lookup,
- afs_cell_trace_unuse_mntpt,
- afs_cell_trace_unuse_no_pin,
- afs_cell_trace_unuse_parse,
- afs_cell_trace_unuse_pin,
- afs_cell_trace_unuse_probe,
- afs_cell_trace_unuse_sbi,
- afs_cell_trace_unuse_ws,
- afs_cell_trace_use_alias,
- afs_cell_trace_use_check_alias,
- afs_cell_trace_use_fc,
- afs_cell_trace_use_fc_alias,
- afs_cell_trace_use_lookup,
- afs_cell_trace_use_mntpt,
- afs_cell_trace_use_pin,
- afs_cell_trace_use_probe,
- afs_cell_trace_use_sbi,
- afs_cell_trace_wait,
-};
-
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
@@ -202,121 +111,6 @@ enum yfs_cm_operation {
yfs_CB_CallBack = 64204,
};
-enum afs_edit_dir_op {
- afs_edit_dir_create,
- afs_edit_dir_create_error,
- afs_edit_dir_create_inval,
- afs_edit_dir_create_nospc,
- afs_edit_dir_delete,
- afs_edit_dir_delete_error,
- afs_edit_dir_delete_inval,
- afs_edit_dir_delete_noent,
-};
-
-enum afs_edit_dir_reason {
- afs_edit_dir_for_create,
- afs_edit_dir_for_link,
- afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename_0,
- afs_edit_dir_for_rename_1,
- afs_edit_dir_for_rename_2,
- afs_edit_dir_for_rmdir,
- afs_edit_dir_for_silly_0,
- afs_edit_dir_for_silly_1,
- afs_edit_dir_for_symlink,
- afs_edit_dir_for_unlink,
-};
-
-enum afs_eproto_cause {
- afs_eproto_bad_status,
- afs_eproto_cb_count,
- afs_eproto_cb_fid_count,
- afs_eproto_cellname_len,
- afs_eproto_file_type,
- afs_eproto_ibulkst_cb_count,
- afs_eproto_ibulkst_count,
- afs_eproto_motd_len,
- afs_eproto_offline_msg_len,
- afs_eproto_volname_len,
- afs_eproto_yvl_fsendpt4_len,
- afs_eproto_yvl_fsendpt6_len,
- afs_eproto_yvl_fsendpt_num,
- afs_eproto_yvl_fsendpt_type,
- afs_eproto_yvl_vlendpt4_len,
- afs_eproto_yvl_vlendpt6_len,
- afs_eproto_yvl_vlendpt_type,
-};
-
-enum afs_io_error {
- afs_io_error_cm_reply,
- afs_io_error_extract,
- afs_io_error_fs_probe_fail,
- afs_io_error_vl_lookup_fail,
- afs_io_error_vl_probe_fail,
-};
-
-enum afs_file_error {
- afs_file_error_dir_bad_magic,
- afs_file_error_dir_big,
- afs_file_error_dir_missing_page,
- afs_file_error_dir_name_too_long,
- afs_file_error_dir_over_end,
- afs_file_error_dir_small,
- afs_file_error_dir_unmarked_ext,
- afs_file_error_mntpt,
- afs_file_error_writeback_fail,
-};
-
-enum afs_flock_event {
- afs_flock_acquired,
- afs_flock_callback_break,
- afs_flock_defer_unlock,
- afs_flock_extend_fail,
- afs_flock_fail_other,
- afs_flock_fail_perm,
- afs_flock_no_lockers,
- afs_flock_release_fail,
- afs_flock_silly_delete,
- afs_flock_timestamp,
- afs_flock_try_to_lock,
- afs_flock_vfs_lock,
- afs_flock_vfs_locking,
- afs_flock_waited,
- afs_flock_waiting,
- afs_flock_work_extending,
- afs_flock_work_retry,
- afs_flock_work_unlocking,
- afs_flock_would_block,
-};
-
-enum afs_flock_operation {
- afs_flock_op_copy_lock,
- afs_flock_op_flock,
- afs_flock_op_grant,
- afs_flock_op_lock,
- afs_flock_op_release_lock,
- afs_flock_op_return_ok,
- afs_flock_op_return_eagain,
- afs_flock_op_return_edeadlk,
- afs_flock_op_return_error,
- afs_flock_op_set_lock,
- afs_flock_op_unlock,
- afs_flock_op_wake,
-};
-
-enum afs_cb_break_reason {
- afs_cb_break_no_break,
- afs_cb_break_no_promise,
- afs_cb_break_for_callback,
- afs_cb_break_for_deleted,
- afs_cb_break_for_lapsed,
- afs_cb_break_for_s_reinit,
- afs_cb_break_for_unlink,
- afs_cb_break_for_v_break,
- afs_cb_break_for_volume_callback,
- afs_cb_break_for_zap,
-};
-
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -391,6 +185,7 @@ enum afs_cb_break_reason {
EM(afs_cell_trace_unuse_fc, "UNU fc ") \
EM(afs_cell_trace_unuse_lookup, "UNU lookup") \
EM(afs_cell_trace_unuse_mntpt, "UNU mntpt ") \
+ EM(afs_cell_trace_unuse_no_pin, "UNU no-pin") \
EM(afs_cell_trace_unuse_parse, "UNU parse ") \
EM(afs_cell_trace_unuse_pin, "UNU pin ") \
EM(afs_cell_trace_unuse_probe, "UNU probe ") \
@@ -615,6 +410,32 @@ enum afs_cb_break_reason {
E_(afs_cb_break_for_zap, "break-zap")
/*
+ * Generate enums for tracing information.
+ */
+#ifndef __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum afs_call_trace { afs_call_traces } __mode(byte);
+enum afs_cb_break_reason { afs_cb_break_reasons } __mode(byte);
+enum afs_cell_trace { afs_cell_traces } __mode(byte);
+enum afs_edit_dir_op { afs_edit_dir_ops } __mode(byte);
+enum afs_edit_dir_reason { afs_edit_dir_reasons } __mode(byte);
+enum afs_eproto_cause { afs_eproto_causes } __mode(byte);
+enum afs_file_error { afs_file_errors } __mode(byte);
+enum afs_flock_event { afs_flock_events } __mode(byte);
+enum afs_flock_operation { afs_flock_operations } __mode(byte);
+enum afs_io_error { afs_io_errors } __mode(byte);
+enum afs_server_trace { afs_server_traces } __mode(byte);
+enum afs_volume_trace { afs_volume_traces } __mode(byte);
+
+#endif /* end __AFS_GENERATE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
* Export enum symbols via userspace.
*/
#undef EM
@@ -654,12 +475,12 @@ TRACE_EVENT(afs_receive_data,
TP_ARGS(call, iter, want_more, ret),
TP_STRUCT__entry(
- __field(loff_t, remain )
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
- __field(bool, want_more )
- __field(int, ret )
+ __field(loff_t, remain)
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
+ __field(bool, want_more)
+ __field(int, ret)
),
TP_fast_assign(
@@ -686,9 +507,9 @@ TRACE_EVENT(afs_notify_call,
TP_ARGS(rxcall, call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, state )
- __field(unsigned short, unmarshall )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, state)
+ __field(unsigned short, unmarshall)
),
TP_fast_assign(
@@ -708,9 +529,9 @@ TRACE_EVENT(afs_cb_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(u32, op )
- __field(u16, service_id )
+ __field(unsigned int, call)
+ __field(u32, op)
+ __field(u16, service_id)
),
TP_fast_assign(
@@ -733,11 +554,11 @@ TRACE_EVENT(afs_call,
TP_ARGS(call_debug_id, op, ref, outstanding, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, op )
- __field(int, ref )
- __field(int, outstanding )
- __field(const void *, where )
+ __field(unsigned int, call)
+ __field(int, op)
+ __field(int, ref)
+ __field(int, outstanding)
+ __field(const void *, where)
),
TP_fast_assign(
@@ -762,9 +583,9 @@ TRACE_EVENT(afs_make_fs_call,
TP_ARGS(call, fid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -794,10 +615,10 @@ TRACE_EVENT(afs_make_fs_calli,
TP_ARGS(call, fid, i),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, i )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
+ __field(unsigned int, call)
+ __field(unsigned int, i)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -829,10 +650,10 @@ TRACE_EVENT(afs_make_fs_call1,
TP_ARGS(call, fid, name),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -866,11 +687,11 @@ TRACE_EVENT(afs_make_fs_call2,
TP_ARGS(call, fid, name, name2),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_fs_operation, op )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
- __array(char, name2, 24 )
+ __field(unsigned int, call)
+ __field(enum afs_fs_operation, op)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
+ __array(char, name2, 24)
),
TP_fast_assign(
@@ -907,8 +728,8 @@ TRACE_EVENT(afs_make_vl_call,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_vl_operation, op )
+ __field(unsigned int, call)
+ __field(enum afs_vl_operation, op)
),
TP_fast_assign(
@@ -927,10 +748,10 @@ TRACE_EVENT(afs_call_done,
TP_ARGS(call),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(struct rxrpc_call *, rx_call )
- __field(int, ret )
- __field(u32, abort_code )
+ __field(unsigned int, call)
+ __field(struct rxrpc_call *, rx_call)
+ __field(int, ret)
+ __field(u32, abort_code)
),
TP_fast_assign(
@@ -953,10 +774,10 @@ TRACE_EVENT(afs_send_data,
TP_ARGS(call, msg),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, flags )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(unsigned int, flags)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -977,10 +798,10 @@ TRACE_EVENT(afs_sent_data,
TP_ARGS(call, msg, ret),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, ret )
- __field(loff_t, offset )
- __field(loff_t, count )
+ __field(unsigned int, call)
+ __field(int, ret)
+ __field(loff_t, offset)
+ __field(loff_t, count)
),
TP_fast_assign(
@@ -1001,9 +822,9 @@ TRACE_EVENT(afs_dir_check_failed,
TP_ARGS(vnode, off, i_size),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(loff_t, off )
- __field(loff_t, i_size )
+ __field(struct afs_vnode *, vnode)
+ __field(loff_t, off)
+ __field(loff_t, i_size)
),
TP_fast_assign(
@@ -1022,11 +843,11 @@ TRACE_EVENT(afs_folio_dirty,
TP_ARGS(vnode, where, folio),
TP_STRUCT__entry(
- __field(struct afs_vnode *, vnode )
- __field(const char *, where )
- __field(pgoff_t, index )
- __field(unsigned long, from )
- __field(unsigned long, to )
+ __field(struct afs_vnode *, vnode)
+ __field(const char *, where)
+ __field(pgoff_t, index)
+ __field(unsigned long, from)
+ __field(unsigned long, to)
),
TP_fast_assign(
@@ -1056,11 +877,11 @@ TRACE_EVENT(afs_call_state,
TP_ARGS(call, from, to, ret, remote_abort),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_call_state, from )
- __field(enum afs_call_state, to )
- __field(int, ret )
- __field(u32, abort )
+ __field(unsigned int, call)
+ __field(enum afs_call_state, from)
+ __field(enum afs_call_state, to)
+ __field(int, ret)
+ __field(u32, abort)
),
TP_fast_assign(
@@ -1084,9 +905,9 @@ TRACE_EVENT(afs_lookup,
TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, dfid )
- __field_struct(struct afs_fid, fid )
- __array(char, name, 24 )
+ __field_struct(struct afs_fid, dfid)
+ __field_struct(struct afs_fid, fid)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1116,15 +937,15 @@ TRACE_EVENT(afs_edit_dir,
TP_ARGS(dvnode, why, op, block, slot, f_vnode, f_unique, name),
TP_STRUCT__entry(
- __field(unsigned int, vnode )
- __field(unsigned int, unique )
- __field(enum afs_edit_dir_reason, why )
- __field(enum afs_edit_dir_op, op )
- __field(unsigned int, block )
- __field(unsigned short, slot )
- __field(unsigned int, f_vnode )
- __field(unsigned int, f_unique )
- __array(char, name, 24 )
+ __field(unsigned int, vnode)
+ __field(unsigned int, unique)
+ __field(enum afs_edit_dir_reason, why)
+ __field(enum afs_edit_dir_op, op)
+ __field(unsigned int, block)
+ __field(unsigned short, slot)
+ __field(unsigned int, f_vnode)
+ __field(unsigned int, f_unique)
+ __array(char, name, 24)
),
TP_fast_assign(
@@ -1157,8 +978,8 @@ TRACE_EVENT(afs_protocol_error,
TP_ARGS(call, cause),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(enum afs_eproto_cause, cause )
+ __field(unsigned int, call)
+ __field(enum afs_eproto_cause, cause)
),
TP_fast_assign(
@@ -1177,9 +998,9 @@ TRACE_EVENT(afs_io_error,
TP_ARGS(call, error, where),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(int, error )
- __field(enum afs_io_error, where )
+ __field(unsigned int, call)
+ __field(int, error)
+ __field(enum afs_io_error, where)
),
TP_fast_assign(
@@ -1199,9 +1020,9 @@ TRACE_EVENT(afs_file_error,
TP_ARGS(vnode, error, where),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(int, error )
- __field(enum afs_file_error, where )
+ __field_struct(struct afs_fid, fid)
+ __field(int, error)
+ __field(enum afs_file_error, where)
),
TP_fast_assign(
@@ -1222,9 +1043,9 @@ TRACE_EVENT(afs_cm_no_server,
TP_ARGS(call, srx),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(struct sockaddr_rxrpc, srx )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(struct sockaddr_rxrpc, srx)
),
TP_fast_assign(
@@ -1243,9 +1064,9 @@ TRACE_EVENT(afs_cm_no_server_u,
TP_ARGS(call, uuid),
TP_STRUCT__entry(
- __field(unsigned int, call )
- __field(unsigned int, op_id )
- __field_struct(uuid_t, uuid )
+ __field(unsigned int, call)
+ __field(unsigned int, op_id)
+ __field_struct(uuid_t, uuid)
),
TP_fast_assign(
@@ -1265,11 +1086,11 @@ TRACE_EVENT(afs_flock_ev,
TP_ARGS(vnode, fl, event, error),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_flock_event, event )
- __field(enum afs_lock_state, state )
- __field(int, error )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_flock_event, event)
+ __field(enum afs_lock_state, state)
+ __field(int, error)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1295,13 +1116,13 @@ TRACE_EVENT(afs_flock_op,
TP_ARGS(vnode, fl, op),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(loff_t, from )
- __field(loff_t, len )
- __field(enum afs_flock_operation, op )
- __field(unsigned char, type )
- __field(unsigned int, flags )
- __field(unsigned int, debug_id )
+ __field_struct(struct afs_fid, fid)
+ __field(loff_t, from)
+ __field(loff_t, len)
+ __field(enum afs_flock_operation, op)
+ __field(unsigned char, type)
+ __field(unsigned int, flags)
+ __field(unsigned int, debug_id)
),
TP_fast_assign(
@@ -1328,7 +1149,7 @@ TRACE_EVENT(afs_reload_dir,
TP_ARGS(vnode),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
+ __field_struct(struct afs_fid, fid)
),
TP_fast_assign(
@@ -1345,8 +1166,8 @@ TRACE_EVENT(afs_silly_rename,
TP_ARGS(vnode, done),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(bool, done )
+ __field_struct(struct afs_fid, fid)
+ __field(bool, done)
),
TP_fast_assign(
@@ -1365,9 +1186,9 @@ TRACE_EVENT(afs_get_tree,
TP_ARGS(cell, volume),
TP_STRUCT__entry(
- __field(u64, vid )
- __array(char, cell, 24 )
- __array(char, volume, 24 )
+ __field(u64, vid)
+ __array(char, cell, 24)
+ __array(char, volume, 24)
),
TP_fast_assign(
@@ -1392,10 +1213,10 @@ TRACE_EVENT(afs_cb_break,
TP_ARGS(fid, cb_break, reason, skipped),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(unsigned int, cb_break )
- __field(enum afs_cb_break_reason, reason )
- __field(bool, skipped )
+ __field_struct(struct afs_fid, fid)
+ __field(unsigned int, cb_break)
+ __field(enum afs_cb_break_reason, reason)
+ __field(bool, skipped)
),
TP_fast_assign(
@@ -1418,8 +1239,8 @@ TRACE_EVENT(afs_cb_miss,
TP_ARGS(fid, reason),
TP_STRUCT__entry(
- __field_struct(struct afs_fid, fid )
- __field(enum afs_cb_break_reason, reason )
+ __field_struct(struct afs_fid, fid)
+ __field(enum afs_cb_break_reason, reason)
),
TP_fast_assign(
@@ -1439,10 +1260,10 @@ TRACE_EVENT(afs_server,
TP_ARGS(server_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, server )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, server)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
@@ -1465,9 +1286,9 @@ TRACE_EVENT(afs_volume,
TP_ARGS(vid, ref, reason),
TP_STRUCT__entry(
- __field(afs_volid_t, vid )
- __field(int, ref )
- __field(enum afs_volume_trace, reason )
+ __field(afs_volid_t, vid)
+ __field(int, ref)
+ __field(enum afs_volume_trace, reason)
),
TP_fast_assign(
@@ -1489,10 +1310,10 @@ TRACE_EVENT(afs_cell,
TP_ARGS(cell_debug_id, ref, active, reason),
TP_STRUCT__entry(
- __field(unsigned int, cell )
- __field(int, ref )
- __field(int, active )
- __field(int, reason )
+ __field(unsigned int, cell)
+ __field(int, ref)
+ __field(int, active)
+ __field(int, reason)
),
TP_fast_assign(
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index ffbe4cec9f32..0f52d0ac47c5 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -242,7 +242,8 @@ typedef struct siginfo {
#define SEGV_ADIPERR 7 /* Precise MCD exception */
#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
-#define NSIGSEGV 9
+#define SEGV_CPERR 10 /* Control protection fault */
+#define NSIGSEGV 10
/*
* SIGBUS si_codes
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 45fa180cc56a..cd639fae9086 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -886,8 +886,11 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv)
#define __NR_set_mempolicy_home_node 450
__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
+#define __NR_cachestat 451
+__SYSCALL(__NR_cachestat, sys_cachestat)
+
#undef __NR_syscalls
-#define __NR_syscalls 451
+#define __NR_syscalls 452
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6eb90df5d05..79b14828d542 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -245,6 +245,8 @@ union drm_amdgpu_bo_list {
/* indicate some errors are detected by RAS */
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
+/* indicate that the reset hasn't completed yet */
+#define AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS (1<<5)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@@ -592,6 +594,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
+#define AMDGPU_CHUNK_ID_CP_GFX_SHADOW 0x0a
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -708,6 +711,15 @@ struct drm_amdgpu_cs_chunk_data {
};
};
+#define AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW 0x1
+
+struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
+ __u64 shadow_va;
+ __u64 csa_va;
+ __u64 gds_va;
+ __u64 flags;
+};
+
/*
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
@@ -876,6 +888,8 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VIDEO_CAPS_DECODE 0
/* Subquery id: Encode */
#define AMDGPU_INFO_VIDEO_CAPS_ENCODE 1
+/* Query the max number of IBs per gang per submission */
+#define AMDGPU_INFO_MAX_IBS 0x22
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -1126,6 +1140,14 @@ struct drm_amdgpu_info_device {
__u64 mall_size; /* AKA infinity cache */
/* high 32 bits of the rb pipes mask */
__u32 enabled_rb_pipes_mask_hi;
+ /* shadow area size for gfx11 */
+ __u32 shadow_size;
+ /* shadow area base virtual alignment for gfx11 */
+ __u32 shadow_alignment;
+ /* context save area size for gfx11 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for gfx11 */
+ __u32 csa_alignment;
};
struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index ac3da855fb19..fa1ceeae2596 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -406,6 +406,8 @@ typedef struct elf64_shdr {
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
+/* Old binutils treats 0x203 as a CET state */
+#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 1b9d0dfae72d..b3fcab13fcd3 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -206,6 +206,7 @@
* - add extension header
* - add FUSE_EXT_GROUPS
* - add FUSE_CREATE_SUPP_GROUP
+ * - add FUSE_HAS_EXPIRE_ONLY
*/
#ifndef _LINUX_FUSE_H
@@ -369,6 +370,7 @@ struct fuse_file_lock {
* FUSE_HAS_INODE_DAX: use per inode DAX
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
+ * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -406,6 +408,7 @@ struct fuse_file_lock {
#define FUSE_SECURITY_CTX (1ULL << 32)
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
+#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
/**
* CUSE INIT request/reply flags
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index f55bc680b5b0..a246e11988d5 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -4,6 +4,7 @@
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
+#include <linux/types.h>
#define MREMAP_MAYMOVE 1
#define MREMAP_FIXED 2
@@ -41,4 +42,17 @@
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
+struct cachestat_range {
+ __u64 off;
+ __u64 len;
+};
+
+struct cachestat {
+ __u64 nr_cache;
+ __u64 nr_dirty;
+ __u64 nr_writeback;
+ __u64 nr_evicted;
+ __u64 nr_recently_evicted;
+};
+
#endif /* _UAPI_LINUX_MMAN_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 0aa955aa8246..6322823b5270 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -274,6 +274,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */
#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */
#define SNDRV_PCM_INFO_SYNC_APPLPTR 0x00000020 /* need the explicit sync of appl_ptr update */
+#define SNDRV_PCM_INFO_PERFECT_DRAIN 0x00000040 /* silencing at the end of stream is not required */
#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */
#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */
#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */
@@ -383,6 +384,9 @@ typedef int snd_pcm_hw_param_t;
#define SNDRV_PCM_HW_PARAMS_NORESAMPLE (1<<0) /* avoid rate resampling */
#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER (1<<1) /* export buffer */
#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP (1<<2) /* disable period wakeups */
+#define SNDRV_PCM_HW_PARAMS_NO_DRAIN_SILENCE (1<<3) /* suppress drain with the filling
+ * of the silence samples
+ */
struct snd_interval {
unsigned int min, max;
diff --git a/init/Kconfig b/init/Kconfig
index 32c24950c4ce..f7f65af4ee12 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1771,6 +1771,16 @@ config RSEQ
If unsure, say Y.
+config CACHESTAT_SYSCALL
+ bool "Enable cachestat() system call" if EXPERT
+ default y
+ help
+ Enable the cachestat system call, which queries the page cache
+ statistics of a file (number of cached pages, dirty pages,
+ pages marked for writeback, (recently) evicted pages).
+
+ If unsure say Y here.
+
config DEBUG_RSEQ
default n
bool "Enabled debugging of rseq() system call" if EXPERT
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index ef313ecfb53a..29c1d3ae2a5c 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -14,6 +14,7 @@
#include <linux/ipc_namespace.h>
#include <linux/msg.h>
#include <linux/slab.h>
+#include <linux/cred.h>
#include "util.h"
static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
@@ -190,25 +191,57 @@ static int set_is_seen(struct ctl_table_set *set)
return &current->nsproxy->ipc_ns->ipc_set == set;
}
+static void ipc_set_ownership(struct ctl_table_header *head,
+ struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct ipc_namespace *ns =
+ container_of(head->set, struct ipc_namespace, ipc_set);
+
+ kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
+ kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
+
+ *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
+ *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
+}
+
static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
{
int mode = table->mode;
#ifdef CONFIG_CHECKPOINT_RESTORE
- struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+ struct ipc_namespace *ns =
+ container_of(head->set, struct ipc_namespace, ipc_set);
if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
(table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
(table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
checkpoint_restore_ns_capable(ns->user_ns))
mode = 0666;
+ else
#endif
- return mode;
+ {
+ kuid_t ns_root_uid;
+ kgid_t ns_root_gid;
+
+ ipc_set_ownership(head, table, &ns_root_uid, &ns_root_gid);
+
+ if (uid_eq(current_euid(), ns_root_uid))
+ mode >>= 6;
+
+ else if (in_egroup_p(ns_root_gid))
+ mode >>= 3;
+ }
+
+ mode &= 7;
+
+ return (mode << 6) | (mode << 3) | mode;
}
static struct ctl_table_root set_root = {
.lookup = set_lookup,
.permissions = ipc_permissions,
+ .set_ownership = ipc_set_ownership,
};
bool setup_ipc_sysctls(struct ipc_namespace *ns)
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index fbf6a8b93a26..ce03930aced5 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -12,6 +12,7 @@
#include <linux/stat.h>
#include <linux/capability.h>
#include <linux/slab.h>
+#include <linux/cred.h>
static int msg_max_limit_min = MIN_MSGMAX;
static int msg_max_limit_max = HARD_MSGMAX;
@@ -76,8 +77,43 @@ static int set_is_seen(struct ctl_table_set *set)
return &current->nsproxy->ipc_ns->mq_set == set;
}
+static void mq_set_ownership(struct ctl_table_header *head,
+ struct ctl_table *table,
+ kuid_t *uid, kgid_t *gid)
+{
+ struct ipc_namespace *ns =
+ container_of(head->set, struct ipc_namespace, mq_set);
+
+ kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
+ kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
+
+ *uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
+ *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
+}
+
+static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table)
+{
+ int mode = table->mode;
+ kuid_t ns_root_uid;
+ kgid_t ns_root_gid;
+
+ mq_set_ownership(head, table, &ns_root_uid, &ns_root_gid);
+
+ if (uid_eq(current_euid(), ns_root_uid))
+ mode >>= 6;
+
+ else if (in_egroup_p(ns_root_gid))
+ mode >>= 3;
+
+ mode &= 7;
+
+ return (mode << 6) | (mode << 3) | mode;
+}
+
static struct ctl_table_root set_root = {
.lookup = set_lookup,
+ .permissions = mq_permissions,
+ .set_ownership = mq_set_ownership,
};
bool setup_mq_sysctls(struct ipc_namespace *ns)
diff --git a/ipc/shm.c b/ipc/shm.c
index 60e45e7045d4..576a543b7cff 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1662,7 +1662,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
goto invalid;
}
- addr = do_mmap(file, addr, size, prot, flags, 0, &populate, NULL);
+ addr = do_mmap(file, addr, size, prot, flags, 0, 0, &populate, NULL);
*raddr = addr;
err = 0;
if (IS_ERR_VALUE(addr))
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 8d368fa353f9..4ef4c4f8a355 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -1423,7 +1423,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = {
#define DYNPTR_SIZE_MASK 0xFFFFFF
#define DYNPTR_RDONLY_BIT BIT(31)
-static bool bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
+static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
{
return ptr->size & DYNPTR_RDONLY_BIT;
}
@@ -1443,11 +1443,18 @@ static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *pt
return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
}
-u32 bpf_dynptr_get_size(const struct bpf_dynptr_kern *ptr)
+u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
{
return ptr->size & DYNPTR_SIZE_MASK;
}
+static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
+{
+ u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
+
+ ptr->size = new_size | metadata;
+}
+
int bpf_dynptr_check_size(u32 size)
{
return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
@@ -1469,7 +1476,7 @@ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
{
- u32 size = bpf_dynptr_get_size(ptr);
+ u32 size = __bpf_dynptr_size(ptr);
if (len > size || offset > size - len)
return -E2BIG;
@@ -1563,7 +1570,7 @@ BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, v
enum bpf_dynptr_type type;
int err;
- if (!dst->data || bpf_dynptr_is_rdonly(dst))
+ if (!dst->data || __bpf_dynptr_is_rdonly(dst))
return -EINVAL;
err = bpf_dynptr_check_off_len(dst, offset, len);
@@ -1619,7 +1626,7 @@ BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u3
if (err)
return 0;
- if (bpf_dynptr_is_rdonly(ptr))
+ if (__bpf_dynptr_is_rdonly(ptr))
return 0;
type = bpf_dynptr_get_type(ptr);
@@ -2142,6 +2149,22 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
return NULL;
return cgrp;
}
+
+/**
+ * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
+ * task's membership of cgroup ancestry.
+ * @task: the task to be tested
+ * @ancestor: possible ancestor of @task's cgroup
+ *
+ * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
+ * It follows all the same rules as cgroup_is_descendant, and only applies
+ * to the default hierarchy.
+ */
+__bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
+ struct cgroup *ancestor)
+{
+ return task_under_cgroup_hierarchy(task, ancestor);
+}
#endif /* CONFIG_CGROUPS */
/**
@@ -2167,13 +2190,15 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
* bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
* @ptr: The dynptr whose data slice to retrieve
* @offset: Offset into the dynptr
- * @buffer: User-provided buffer to copy contents into
- * @buffer__szk: Size (in bytes) of the buffer. This is the length of the
- * requested slice. This must be a constant.
+ * @buffer__opt: User-provided buffer to copy contents into. May be NULL
+ * @buffer__szk: Size (in bytes) of the buffer if present. This is the
+ * length of the requested slice. This must be a constant.
*
* For non-skb and non-xdp type dynptrs, there is no difference between
* bpf_dynptr_slice and bpf_dynptr_data.
*
+ * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
+ *
* If the intention is to write to the data slice, please use
* bpf_dynptr_slice_rdwr.
*
@@ -2190,7 +2215,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
* direct pointer)
*/
__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset,
- void *buffer, u32 buffer__szk)
+ void *buffer__opt, u32 buffer__szk)
{
enum bpf_dynptr_type type;
u32 len = buffer__szk;
@@ -2210,15 +2235,17 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
case BPF_DYNPTR_TYPE_RINGBUF:
return ptr->data + ptr->offset + offset;
case BPF_DYNPTR_TYPE_SKB:
- return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer);
+ return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
case BPF_DYNPTR_TYPE_XDP:
{
void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
if (xdp_ptr)
return xdp_ptr;
- bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer, len, false);
- return buffer;
+ if (!buffer__opt)
+ return NULL;
+ bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
+ return buffer__opt;
}
default:
WARN_ONCE(true, "unknown dynptr type %d\n", type);
@@ -2230,13 +2257,15 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
* bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
* @ptr: The dynptr whose data slice to retrieve
* @offset: Offset into the dynptr
- * @buffer: User-provided buffer to copy contents into
- * @buffer__szk: Size (in bytes) of the buffer. This is the length of the
- * requested slice. This must be a constant.
+ * @buffer__opt: User-provided buffer to copy contents into. May be NULL
+ * @buffer__szk: Size (in bytes) of the buffer if present. This is the
+ * length of the requested slice. This must be a constant.
*
* For non-skb and non-xdp type dynptrs, there is no difference between
* bpf_dynptr_slice and bpf_dynptr_data.
*
+ * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
+ *
* The returned pointer is writable and may point to either directly the dynptr
* data at the requested offset or to the buffer if unable to obtain a direct
* data pointer to (example: the requested slice is to the paged area of an skb
@@ -2267,9 +2296,9 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset
* direct pointer)
*/
__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset,
- void *buffer, u32 buffer__szk)
+ void *buffer__opt, u32 buffer__szk)
{
- if (!ptr->data || bpf_dynptr_is_rdonly(ptr))
+ if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
return NULL;
/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
@@ -2294,7 +2323,59 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o
* will be copied out into the buffer and the user will need to call
* bpf_dynptr_write() to commit changes.
*/
- return bpf_dynptr_slice(ptr, offset, buffer, buffer__szk);
+ return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk);
+}
+
+__bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end)
+{
+ u32 size;
+
+ if (!ptr->data || start > end)
+ return -EINVAL;
+
+ size = __bpf_dynptr_size(ptr);
+
+ if (start > size || end > size)
+ return -ERANGE;
+
+ ptr->offset += start;
+ bpf_dynptr_set_size(ptr, end - start);
+
+ return 0;
+}
+
+__bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr)
+{
+ return !ptr->data;
+}
+
+__bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr)
+{
+ if (!ptr->data)
+ return false;
+
+ return __bpf_dynptr_is_rdonly(ptr);
+}
+
+__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
+{
+ if (!ptr->data)
+ return -EINVAL;
+
+ return __bpf_dynptr_size(ptr);
+}
+
+__bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr,
+ struct bpf_dynptr_kern *clone__uninit)
+{
+ if (!ptr->data) {
+ bpf_dynptr_set_null(clone__uninit);
+ return -EINVAL;
+ }
+
+ *clone__uninit = *ptr;
+
+ return 0;
}
__bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
@@ -2341,6 +2422,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
#endif
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_SET8_END(generic_btf_ids)
@@ -2369,6 +2451,11 @@ BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_dynptr_adjust)
+BTF_ID_FLAGS(func, bpf_dynptr_is_null)
+BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
+BTF_ID_FLAGS(func, bpf_dynptr_size)
+BTF_ID_FLAGS(func, bpf_dynptr_clone)
BTF_SET8_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 14f39c1e573e..909c112ef537 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -5380,7 +5380,8 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write,
*(int *)table->data = unpriv_enable;
}
- unpriv_ebpf_notify(unpriv_enable);
+ if (write)
+ unpriv_ebpf_notify(unpriv_enable);
return ret;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fbcf5a4e2fcd..754129d41225 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -240,6 +240,12 @@ static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
(poisoned ? BPF_MAP_KEY_POISON : 0ULL);
}
+static bool bpf_helper_call(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL) &&
+ insn->src_reg == 0;
+}
+
static bool bpf_pseudo_call(const struct bpf_insn *insn)
{
return insn->code == (BPF_JMP | BPF_CALL) &&
@@ -309,6 +315,7 @@ struct bpf_kfunc_call_arg_meta {
struct {
enum bpf_dynptr_type type;
u32 id;
+ u32 ref_obj_id;
} initialized_dynptr;
struct {
u8 spi;
@@ -468,6 +475,13 @@ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
return rec;
}
+static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
+{
+ struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
+
+ return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL;
+}
+
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{
return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
@@ -515,6 +529,8 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id)
return func_id == BPF_FUNC_dynptr_data;
}
+static bool is_callback_calling_kfunc(u32 btf_id);
+
static bool is_callback_calling_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_for_each_map_elem ||
@@ -524,6 +540,11 @@ static bool is_callback_calling_function(enum bpf_func_id func_id)
func_id == BPF_FUNC_user_ringbuf_drain;
}
+static bool is_async_callback_calling_function(enum bpf_func_id func_id)
+{
+ return func_id == BPF_FUNC_timer_set_callback;
+}
+
static bool is_storage_get_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_sk_storage_get ||
@@ -604,9 +625,9 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
type & PTR_TRUSTED ? "trusted_" : ""
);
- snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
+ snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
prefix, str[base_type(type)], postfix);
- return env->type_str_buf;
+ return env->tmp_str_buf;
}
static char slot_type_char[] = {
@@ -847,11 +868,11 @@ static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- enum bpf_arg_type arg_type, int insn_idx)
+ enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id)
{
struct bpf_func_state *state = func(env, reg);
enum bpf_dynptr_type type;
- int spi, i, id, err;
+ int spi, i, err;
spi = dynptr_get_spi(env, reg);
if (spi < 0)
@@ -887,7 +908,13 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
if (dynptr_type_refcounted(type)) {
/* The id is used to track proper releasing */
- id = acquire_reference_state(env, insn_idx);
+ int id;
+
+ if (clone_ref_obj_id)
+ id = clone_ref_obj_id;
+ else
+ id = acquire_reference_state(env, insn_idx);
+
if (id < 0)
return id;
@@ -901,24 +928,15 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
return 0;
}
-static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
{
- struct bpf_func_state *state = func(env, reg);
- int spi, i;
-
- spi = dynptr_get_spi(env, reg);
- if (spi < 0)
- return spi;
+ int i;
for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_INVALID;
state->stack[spi - 1].slot_type[i] = STACK_INVALID;
}
- /* Invalidate any slices associated with this dynptr */
- if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type))
- WARN_ON_ONCE(release_reference(env, state->stack[spi].spilled_ptr.ref_obj_id));
-
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
@@ -945,6 +963,50 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
*/
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
+}
+
+static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ struct bpf_func_state *state = func(env, reg);
+ int spi, ref_obj_id, i;
+
+ spi = dynptr_get_spi(env, reg);
+ if (spi < 0)
+ return spi;
+
+ if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
+ invalidate_dynptr(env, state, spi);
+ return 0;
+ }
+
+ ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id;
+
+ /* If the dynptr has a ref_obj_id, then we need to invalidate
+ * two things:
+ *
+ * 1) Any dynptrs with a matching ref_obj_id (clones)
+ * 2) Any slices derived from this dynptr.
+ */
+
+ /* Invalidate any slices associated with this dynptr */
+ WARN_ON_ONCE(release_reference(env, ref_obj_id));
+
+ /* Invalidate any dynptr clones */
+ for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id)
+ continue;
+
+ /* it should always be the case that if the ref obj id
+ * matches then the stack slot also belongs to a
+ * dynptr
+ */
+ if (state->stack[i].slot_type[0] != STACK_DYNPTR) {
+ verbose(env, "verifier internal error: misconfigured ref_obj_id\n");
+ return -EFAULT;
+ }
+ if (state->stack[i].spilled_ptr.dynptr.first_slot)
+ invalidate_dynptr(env, state, i);
+ }
return 0;
}
@@ -1254,6 +1316,12 @@ static bool is_spilled_reg(const struct bpf_stack_state *stack)
return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
}
+static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
+{
+ return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL &&
+ stack->spilled_ptr.type == SCALAR_VALUE;
+}
+
static void scrub_spilled_slot(u8 *stype)
{
if (*stype != STACK_INVALID)
@@ -3144,12 +3212,172 @@ static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
return btf_name_by_offset(desc_btf, func->name_off);
}
+static inline void bt_init(struct backtrack_state *bt, u32 frame)
+{
+ bt->frame = frame;
+}
+
+static inline void bt_reset(struct backtrack_state *bt)
+{
+ struct bpf_verifier_env *env = bt->env;
+
+ memset(bt, 0, sizeof(*bt));
+ bt->env = env;
+}
+
+static inline u32 bt_empty(struct backtrack_state *bt)
+{
+ u64 mask = 0;
+ int i;
+
+ for (i = 0; i <= bt->frame; i++)
+ mask |= bt->reg_masks[i] | bt->stack_masks[i];
+
+ return mask == 0;
+}
+
+static inline int bt_subprog_enter(struct backtrack_state *bt)
+{
+ if (bt->frame == MAX_CALL_FRAMES - 1) {
+ verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame);
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ bt->frame++;
+ return 0;
+}
+
+static inline int bt_subprog_exit(struct backtrack_state *bt)
+{
+ if (bt->frame == 0) {
+ verbose(bt->env, "BUG subprog exit from frame 0\n");
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ bt->frame--;
+ return 0;
+}
+
+static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
+{
+ bt->reg_masks[frame] |= 1 << reg;
+}
+
+static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
+{
+ bt->reg_masks[frame] &= ~(1 << reg);
+}
+
+static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
+{
+ bt_set_frame_reg(bt, bt->frame, reg);
+}
+
+static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
+{
+ bt_clear_frame_reg(bt, bt->frame, reg);
+}
+
+static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ bt->stack_masks[frame] |= 1ull << slot;
+}
+
+static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
+{
+ bt->stack_masks[frame] &= ~(1ull << slot);
+}
+
+static inline void bt_set_slot(struct backtrack_state *bt, u32 slot)
+{
+ bt_set_frame_slot(bt, bt->frame, slot);
+}
+
+static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot)
+{
+ bt_clear_frame_slot(bt, bt->frame, slot);
+}
+
+static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
+{
+ return bt->reg_masks[frame];
+}
+
+static inline u32 bt_reg_mask(struct backtrack_state *bt)
+{
+ return bt->reg_masks[bt->frame];
+}
+
+static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
+{
+ return bt->stack_masks[frame];
+}
+
+static inline u64 bt_stack_mask(struct backtrack_state *bt)
+{
+ return bt->stack_masks[bt->frame];
+}
+
+static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
+{
+ return bt->reg_masks[bt->frame] & (1 << reg);
+}
+
+static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot)
+{
+ return bt->stack_masks[bt->frame] & (1ull << slot);
+}
+
+/* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
+static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
+{
+ DECLARE_BITMAP(mask, 64);
+ bool first = true;
+ int i, n;
+
+ buf[0] = '\0';
+
+ bitmap_from_u64(mask, reg_mask);
+ for_each_set_bit(i, mask, 32) {
+ n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i);
+ first = false;
+ buf += n;
+ buf_sz -= n;
+ if (buf_sz < 0)
+ break;
+ }
+}
+/* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
+static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
+{
+ DECLARE_BITMAP(mask, 64);
+ bool first = true;
+ int i, n;
+
+ buf[0] = '\0';
+
+ bitmap_from_u64(mask, stack_mask);
+ for_each_set_bit(i, mask, 64) {
+ n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8);
+ first = false;
+ buf += n;
+ buf_sz -= n;
+ if (buf_sz < 0)
+ break;
+ }
+}
+
/* For given verifier state backtrack_insn() is called from the last insn to
* the first insn. Its purpose is to compute a bitmask of registers and
* stack slots that needs precision in the parent verifier state.
+ *
+ * @idx is an index of the instruction we are currently processing;
+ * @subseq_idx is an index of the subsequent instruction that:
+ * - *would be* executed next, if jump history is viewed in forward order;
+ * - *was* processed previously during backtracking.
*/
-static int backtrack_insn(struct bpf_verifier_env *env, int idx,
- u32 *reg_mask, u64 *stack_mask)
+static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ struct backtrack_state *bt)
{
const struct bpf_insn_cbs cbs = {
.cb_call = disasm_kfunc_name,
@@ -3160,20 +3388,24 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
u8 class = BPF_CLASS(insn->code);
u8 opcode = BPF_OP(insn->code);
u8 mode = BPF_MODE(insn->code);
- u32 dreg = 1u << insn->dst_reg;
- u32 sreg = 1u << insn->src_reg;
- u32 spi;
+ u32 dreg = insn->dst_reg;
+ u32 sreg = insn->src_reg;
+ u32 spi, i;
if (insn->code == 0)
return 0;
if (env->log.level & BPF_LOG_LEVEL2) {
- verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
+ fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt));
+ verbose(env, "mark_precise: frame%d: regs=%s ",
+ bt->frame, env->tmp_str_buf);
+ fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
+ verbose(env, "stack=%s before ", env->tmp_str_buf);
verbose(env, "%d: ", idx);
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
if (class == BPF_ALU || class == BPF_ALU64) {
- if (!(*reg_mask & dreg))
+ if (!bt_is_reg_set(bt, dreg))
return 0;
if (opcode == BPF_MOV) {
if (BPF_SRC(insn->code) == BPF_X) {
@@ -3181,8 +3413,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
* dreg needs precision after this insn
* sreg needs precision before this insn
*/
- *reg_mask &= ~dreg;
- *reg_mask |= sreg;
+ bt_clear_reg(bt, dreg);
+ bt_set_reg(bt, sreg);
} else {
/* dreg = K
* dreg needs precision after this insn.
@@ -3190,7 +3422,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
* as precise=true in this verifier state.
* No further markings in parent are necessary
*/
- *reg_mask &= ~dreg;
+ bt_clear_reg(bt, dreg);
}
} else {
if (BPF_SRC(insn->code) == BPF_X) {
@@ -3198,15 +3430,15 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
* both dreg and sreg need precision
* before this insn
*/
- *reg_mask |= sreg;
+ bt_set_reg(bt, sreg);
} /* else dreg += K
* dreg still needs precision before this insn
*/
}
} else if (class == BPF_LDX) {
- if (!(*reg_mask & dreg))
+ if (!bt_is_reg_set(bt, dreg))
return 0;
- *reg_mask &= ~dreg;
+ bt_clear_reg(bt, dreg);
/* scalars can only be spilled into stack w/o losing precision.
* Load from any other memory can be zero extended.
@@ -3227,9 +3459,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
}
- *stack_mask |= 1ull << spi;
+ bt_set_slot(bt, spi);
} else if (class == BPF_STX || class == BPF_ST) {
- if (*reg_mask & dreg)
+ if (bt_is_reg_set(bt, dreg))
/* stx & st shouldn't be using _scalar_ dst_reg
* to access memory. It means backtracking
* encountered a case of pointer subtraction.
@@ -3244,20 +3476,92 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
}
- if (!(*stack_mask & (1ull << spi)))
+ if (!bt_is_slot_set(bt, spi))
return 0;
- *stack_mask &= ~(1ull << spi);
+ bt_clear_slot(bt, spi);
if (class == BPF_STX)
- *reg_mask |= sreg;
+ bt_set_reg(bt, sreg);
} else if (class == BPF_JMP || class == BPF_JMP32) {
- if (opcode == BPF_CALL) {
- if (insn->src_reg == BPF_PSEUDO_CALL)
- return -ENOTSUPP;
- /* BPF helpers that invoke callback subprogs are
- * equivalent to BPF_PSEUDO_CALL above
+ if (bpf_pseudo_call(insn)) {
+ int subprog_insn_idx, subprog;
+
+ subprog_insn_idx = idx + insn->imm + 1;
+ subprog = find_subprog(env, subprog_insn_idx);
+ if (subprog < 0)
+ return -EFAULT;
+
+ if (subprog_is_global(env, subprog)) {
+ /* check that jump history doesn't have any
+ * extra instructions from subprog; the next
+ * instruction after call to global subprog
+ * should be literally next instruction in
+ * caller program
+ */
+ WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug");
+ /* r1-r5 are invalidated after subprog call,
+ * so for global func call it shouldn't be set
+ * anymore
+ */
+ if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ /* global subprog always sets R0 */
+ bt_clear_reg(bt, BPF_REG_0);
+ return 0;
+ } else {
+ /* static subprog call instruction, which
+ * means that we are exiting current subprog,
+ * so only r1-r5 could be still requested as
+ * precise, r0 and r6-r10 or any stack slot in
+ * the current frame should be zero by now
+ */
+ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ /* we don't track register spills perfectly,
+ * so fallback to force-precise instead of failing */
+ if (bt_stack_mask(bt) != 0)
+ return -ENOTSUPP;
+ /* propagate r1-r5 to the caller */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
+ if (bt_is_reg_set(bt, i)) {
+ bt_clear_reg(bt, i);
+ bt_set_frame_reg(bt, bt->frame - 1, i);
+ }
+ }
+ if (bt_subprog_exit(bt))
+ return -EFAULT;
+ return 0;
+ }
+ } else if ((bpf_helper_call(insn) &&
+ is_callback_calling_function(insn->imm) &&
+ !is_async_callback_calling_function(insn->imm)) ||
+ (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) {
+ /* callback-calling helper or kfunc call, which means
+ * we are exiting from subprog, but unlike the subprog
+ * call handling above, we shouldn't propagate
+ * precision of r1-r5 (if any requested), as they are
+ * not actually arguments passed directly to callback
+ * subprogs
*/
- if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
+ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+ if (bt_stack_mask(bt) != 0)
return -ENOTSUPP;
+ /* clear r1-r5 in callback subprog's mask */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+ bt_clear_reg(bt, i);
+ if (bt_subprog_exit(bt))
+ return -EFAULT;
+ return 0;
+ } else if (opcode == BPF_CALL) {
/* kfunc with imm==0 is invalid and fixup_kfunc_call will
* catch this error later. Make backtracking conservative
* with ENOTSUPP.
@@ -3265,19 +3569,51 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
return -ENOTSUPP;
/* regular helper call sets R0 */
- *reg_mask &= ~1;
- if (*reg_mask & 0x3f) {
+ bt_clear_reg(bt, BPF_REG_0);
+ if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
/* if backtracing was looking for registers R1-R5
* they should have been found already.
*/
- verbose(env, "BUG regs %x\n", *reg_mask);
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
}
} else if (opcode == BPF_EXIT) {
- return -ENOTSUPP;
+ bool r0_precise;
+
+ if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
+ /* if backtracing was looking for registers R1-R5
+ * they should have been found already.
+ */
+ verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+
+ /* BPF_EXIT in subprog or callback always returns
+ * right after the call instruction, so by checking
+ * whether the instruction at subseq_idx-1 is subprog
+ * call or not we can distinguish actual exit from
+ * *subprog* from exit from *callback*. In the former
+ * case, we need to propagate r0 precision, if
+ * necessary. In the former we never do that.
+ */
+ r0_precise = subseq_idx - 1 >= 0 &&
+ bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) &&
+ bt_is_reg_set(bt, BPF_REG_0);
+
+ bt_clear_reg(bt, BPF_REG_0);
+ if (bt_subprog_enter(bt))
+ return -EFAULT;
+
+ if (r0_precise)
+ bt_set_reg(bt, BPF_REG_0);
+ /* r6-r9 and stack slots will stay set in caller frame
+ * bitmasks until we return back from callee(s)
+ */
+ return 0;
} else if (BPF_SRC(insn->code) == BPF_X) {
- if (!(*reg_mask & (dreg | sreg)))
+ if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg))
return 0;
/* dreg <cond> sreg
* Both dreg and sreg need precision before
@@ -3285,7 +3621,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
* before it would be equally necessary to
* propagate it to dreg.
*/
- *reg_mask |= (sreg | dreg);
+ bt_set_reg(bt, dreg);
+ bt_set_reg(bt, sreg);
/* else dreg <cond> K
* Only dreg still needs precision before
* this insn, so for the K-based conditional
@@ -3293,9 +3630,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
*/
}
} else if (class == BPF_LD) {
- if (!(*reg_mask & dreg))
+ if (!bt_is_reg_set(bt, dreg))
return 0;
- *reg_mask &= ~dreg;
+ bt_clear_reg(bt, dreg);
/* It's ld_imm64 or ld_abs or ld_ind.
* For ld_imm64 no further tracking of precision
* into parent is necessary
@@ -3366,6 +3703,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
struct bpf_reg_state *reg;
int i, j;
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n",
+ st->curframe);
+ }
+
/* big hammer: mark all scalars precise in this path.
* pop_stack may still get !precise scalars.
* We also skip current state and go straight to first parent state,
@@ -3377,17 +3719,25 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env,
func = st->frame[i];
for (j = 0; j < BPF_REG_FP; j++) {
reg = &func->regs[j];
- if (reg->type != SCALAR_VALUE)
+ if (reg->type != SCALAR_VALUE || reg->precise)
continue;
reg->precise = true;
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ verbose(env, "force_precise: frame%d: forcing r%d to be precise\n",
+ i, j);
+ }
}
for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
if (!is_spilled_reg(&func->stack[j]))
continue;
reg = &func->stack[j].spilled_ptr;
- if (reg->type != SCALAR_VALUE)
+ if (reg->type != SCALAR_VALUE || reg->precise)
continue;
reg->precise = true;
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n",
+ i, -(j + 1) * 8);
+ }
}
}
}
@@ -3505,62 +3855,48 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_
* mark_all_scalars_imprecise() to hopefully get more permissive and generic
* finalized states which help in short circuiting more future states.
*/
-static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
- int spi)
+static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
{
+ struct backtrack_state *bt = &env->bt;
struct bpf_verifier_state *st = env->cur_state;
int first_idx = st->first_insn_idx;
int last_idx = env->insn_idx;
struct bpf_func_state *func;
struct bpf_reg_state *reg;
- u32 reg_mask = regno >= 0 ? 1u << regno : 0;
- u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
bool skip_first = true;
- bool new_marks = false;
- int i, err;
+ int i, prev_i, fr, err;
if (!env->bpf_capable)
return 0;
+ /* set frame number from which we are starting to backtrack */
+ bt_init(bt, env->cur_state->curframe);
+
/* Do sanity checks against current state of register and/or stack
* slot, but don't set precise flag in current state, as precision
* tracking in the current state is unnecessary.
*/
- func = st->frame[frame];
+ func = st->frame[bt->frame];
if (regno >= 0) {
reg = &func->regs[regno];
if (reg->type != SCALAR_VALUE) {
WARN_ONCE(1, "backtracing misuse");
return -EFAULT;
}
- new_marks = true;
- }
-
- while (spi >= 0) {
- if (!is_spilled_reg(&func->stack[spi])) {
- stack_mask = 0;
- break;
- }
- reg = &func->stack[spi].spilled_ptr;
- if (reg->type != SCALAR_VALUE) {
- stack_mask = 0;
- break;
- }
- new_marks = true;
- break;
+ bt_set_reg(bt, regno);
}
- if (!new_marks)
- return 0;
- if (!reg_mask && !stack_mask)
+ if (bt_empty(bt))
return 0;
for (;;) {
DECLARE_BITMAP(mask, 64);
u32 history = st->jmp_history_cnt;
- if (env->log.level & BPF_LOG_LEVEL2)
- verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d\n",
+ bt->frame, last_idx, first_idx);
+ }
if (last_idx < 0) {
/* we are at the entry into subprog, which
@@ -3571,12 +3907,13 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
if (st->curframe == 0 &&
st->frame[0]->subprogno > 0 &&
st->frame[0]->callsite == BPF_MAIN_FUNC &&
- stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
- bitmap_from_u64(mask, reg_mask);
+ bt_stack_mask(bt) == 0 &&
+ (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) {
+ bitmap_from_u64(mask, bt_reg_mask(bt));
for_each_set_bit(i, mask, 32) {
reg = &st->frame[0]->regs[i];
if (reg->type != SCALAR_VALUE) {
- reg_mask &= ~(1u << i);
+ bt_clear_reg(bt, i);
continue;
}
reg->precise = true;
@@ -3584,26 +3921,27 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
return 0;
}
- verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
- st->frame[0]->subprogno, reg_mask, stack_mask);
+ verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n",
+ st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt));
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
}
- for (i = last_idx;;) {
+ for (i = last_idx, prev_i = -1;;) {
if (skip_first) {
err = 0;
skip_first = false;
} else {
- err = backtrack_insn(env, i, &reg_mask, &stack_mask);
+ err = backtrack_insn(env, i, prev_i, bt);
}
if (err == -ENOTSUPP) {
- mark_all_scalars_precise(env, st);
+ mark_all_scalars_precise(env, env->cur_state);
+ bt_reset(bt);
return 0;
} else if (err) {
return err;
}
- if (!reg_mask && !stack_mask)
+ if (bt_empty(bt))
/* Found assignment(s) into tracked register in this state.
* Since this state is already marked, just return.
* Nothing to be tracked further in the parent state.
@@ -3611,6 +3949,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
return 0;
if (i == first_idx)
break;
+ prev_i = i;
i = get_prev_insn_idx(st, i, &history);
if (i >= env->prog->len) {
/* This can happen if backtracking reached insn 0
@@ -3628,84 +3967,94 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r
if (!st)
break;
- new_marks = false;
- func = st->frame[frame];
- bitmap_from_u64(mask, reg_mask);
- for_each_set_bit(i, mask, 32) {
- reg = &func->regs[i];
- if (reg->type != SCALAR_VALUE) {
- reg_mask &= ~(1u << i);
- continue;
- }
- if (!reg->precise)
- new_marks = true;
- reg->precise = true;
- }
-
- bitmap_from_u64(mask, stack_mask);
- for_each_set_bit(i, mask, 64) {
- if (i >= func->allocated_stack / BPF_REG_SIZE) {
- /* the sequence of instructions:
- * 2: (bf) r3 = r10
- * 3: (7b) *(u64 *)(r3 -8) = r0
- * 4: (79) r4 = *(u64 *)(r10 -8)
- * doesn't contain jmps. It's backtracked
- * as a single block.
- * During backtracking insn 3 is not recognized as
- * stack access, so at the end of backtracking
- * stack slot fp-8 is still marked in stack_mask.
- * However the parent state may not have accessed
- * fp-8 and it's "unallocated" stack space.
- * In such case fallback to conservative.
- */
- mark_all_scalars_precise(env, st);
- return 0;
+ for (fr = bt->frame; fr >= 0; fr--) {
+ func = st->frame[fr];
+ bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
+ for_each_set_bit(i, mask, 32) {
+ reg = &func->regs[i];
+ if (reg->type != SCALAR_VALUE) {
+ bt_clear_frame_reg(bt, fr, i);
+ continue;
+ }
+ if (reg->precise)
+ bt_clear_frame_reg(bt, fr, i);
+ else
+ reg->precise = true;
}
- if (!is_spilled_reg(&func->stack[i])) {
- stack_mask &= ~(1ull << i);
- continue;
+ bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
+ for_each_set_bit(i, mask, 64) {
+ if (i >= func->allocated_stack / BPF_REG_SIZE) {
+ /* the sequence of instructions:
+ * 2: (bf) r3 = r10
+ * 3: (7b) *(u64 *)(r3 -8) = r0
+ * 4: (79) r4 = *(u64 *)(r10 -8)
+ * doesn't contain jmps. It's backtracked
+ * as a single block.
+ * During backtracking insn 3 is not recognized as
+ * stack access, so at the end of backtracking
+ * stack slot fp-8 is still marked in stack_mask.
+ * However the parent state may not have accessed
+ * fp-8 and it's "unallocated" stack space.
+ * In such case fallback to conservative.
+ */
+ mark_all_scalars_precise(env, env->cur_state);
+ bt_reset(bt);
+ return 0;
+ }
+
+ if (!is_spilled_scalar_reg(&func->stack[i])) {
+ bt_clear_frame_slot(bt, fr, i);
+ continue;
+ }
+ reg = &func->stack[i].spilled_ptr;
+ if (reg->precise)
+ bt_clear_frame_slot(bt, fr, i);
+ else
+ reg->precise = true;
}
- reg = &func->stack[i].spilled_ptr;
- if (reg->type != SCALAR_VALUE) {
- stack_mask &= ~(1ull << i);
- continue;
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
+ bt_frame_reg_mask(bt, fr));
+ verbose(env, "mark_precise: frame%d: parent state regs=%s ",
+ fr, env->tmp_str_buf);
+ fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
+ bt_frame_stack_mask(bt, fr));
+ verbose(env, "stack=%s: ", env->tmp_str_buf);
+ print_verifier_state(env, func, true);
}
- if (!reg->precise)
- new_marks = true;
- reg->precise = true;
- }
- if (env->log.level & BPF_LOG_LEVEL2) {
- verbose(env, "parent %s regs=%x stack=%llx marks:",
- new_marks ? "didn't have" : "already had",
- reg_mask, stack_mask);
- print_verifier_state(env, func, true);
}
- if (!reg_mask && !stack_mask)
- break;
- if (!new_marks)
- break;
+ if (bt_empty(bt))
+ return 0;
last_idx = st->last_insn_idx;
first_idx = st->first_insn_idx;
}
+
+ /* if we still have requested precise regs or slots, we missed
+ * something (e.g., stack access through non-r10 register), so
+ * fallback to marking all precise
+ */
+ if (!bt_empty(bt)) {
+ mark_all_scalars_precise(env, env->cur_state);
+ bt_reset(bt);
+ }
+
return 0;
}
int mark_chain_precision(struct bpf_verifier_env *env, int regno)
{
- return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
-}
-
-static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
-{
- return __mark_chain_precision(env, frame, regno, -1);
+ return __mark_chain_precision(env, regno);
}
-static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
+/* mark_chain_precision_batch() assumes that env->bt is set in the caller to
+ * desired reg and stack masks across all relevant frames
+ */
+static int mark_chain_precision_batch(struct bpf_verifier_env *env)
{
- return __mark_chain_precision(env, frame, -1, spi);
+ return __mark_chain_precision(env, -1);
}
static bool is_spillable_regtype(enum bpf_reg_type type)
@@ -4067,6 +4416,7 @@ static void mark_reg_stack_read(struct bpf_verifier_env *env,
for (i = min_off; i < max_off; i++) {
slot = -i - 1;
spi = slot / BPF_REG_SIZE;
+ mark_stack_slot_scratched(env, spi);
stype = ptr_state->stack[spi].slot_type;
if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
break;
@@ -4118,6 +4468,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr;
+ mark_stack_slot_scratched(env, spi);
+
if (is_spilled_reg(&reg_state->stack[spi])) {
u8 spill_size = 1;
@@ -6677,7 +7029,7 @@ static int process_kptr_func(struct bpf_verifier_env *env, int regno,
* type, and declare it as 'const struct bpf_dynptr *' in their prototype.
*/
static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
- enum bpf_arg_type arg_type)
+ enum bpf_arg_type arg_type, int clone_ref_obj_id)
{
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
int err;
@@ -6721,7 +7073,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn
return err;
}
- err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx);
+ err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id);
} else /* MEM_RDONLY and None case from above */ {
/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
@@ -7143,12 +7495,16 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
* ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
* but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
*
+ * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type.
+ *
* Therefore we fold these flags depending on the arg_type before comparison.
*/
if (arg_type & MEM_RDONLY)
type &= ~MEM_RDONLY;
if (arg_type & PTR_MAYBE_NULL)
type &= ~PTR_MAYBE_NULL;
+ if (base_type(arg_type) == ARG_PTR_TO_MEM)
+ type &= ~DYNPTR_TYPE_FLAG_MASK;
if (meta->func_id == BPF_FUNC_kptr_xchg && type & MEM_ALLOC)
type &= ~MEM_ALLOC;
@@ -7631,7 +7987,7 @@ skip_type_check:
err = check_mem_size_reg(env, reg, regno, true, meta);
break;
case ARG_PTR_TO_DYNPTR:
- err = process_dynptr_func(env, regno, insn_idx, arg_type);
+ err = process_dynptr_func(env, regno, insn_idx, arg_type, 0);
if (err)
return err;
break;
@@ -8178,17 +8534,13 @@ static int set_callee_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee, int insn_idx);
-static bool is_callback_calling_kfunc(u32 btf_id);
-
static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx, int subprog,
set_callee_state_fn set_callee_state_cb)
{
struct bpf_verifier_state *state = env->cur_state;
- struct bpf_func_info_aux *func_info_aux;
struct bpf_func_state *caller, *callee;
int err;
- bool is_global = false;
if (state->curframe + 1 >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
@@ -8203,13 +8555,10 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EFAULT;
}
- func_info_aux = env->prog->aux->func_info_aux;
- if (func_info_aux)
- is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
err = btf_check_subprog_call(env, subprog, caller->regs);
if (err == -EFAULT)
return err;
- if (is_global) {
+ if (subprog_is_global(env, subprog)) {
if (err) {
verbose(env, "Caller passes invalid args into func#%d\n",
subprog);
@@ -9398,6 +9747,11 @@ static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
return __kfunc_param_match_suffix(btf, arg, "__szk");
}
+static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
+{
+ return __kfunc_param_match_suffix(btf, arg, "__opt");
+}
+
static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
{
return __kfunc_param_match_suffix(btf, arg, "__k");
@@ -9595,6 +9949,7 @@ enum special_kfunc_type {
KF_bpf_dynptr_from_xdp,
KF_bpf_dynptr_slice,
KF_bpf_dynptr_slice_rdwr,
+ KF_bpf_dynptr_clone,
};
BTF_SET_START(special_kfunc_set)
@@ -9614,6 +9969,7 @@ BTF_ID(func, bpf_dynptr_from_skb)
BTF_ID(func, bpf_dynptr_from_xdp)
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
+BTF_ID(func, bpf_dynptr_clone)
BTF_SET_END(special_kfunc_set)
BTF_ID_LIST(special_kfunc_list)
@@ -9635,6 +9991,7 @@ BTF_ID(func, bpf_dynptr_from_skb)
BTF_ID(func, bpf_dynptr_from_xdp)
BTF_ID(func, bpf_dynptr_slice)
BTF_ID(func, bpf_dynptr_slice_rdwr)
+BTF_ID(func, bpf_dynptr_clone)
static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
{
@@ -10330,6 +10687,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_DYNPTR:
{
enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
+ int clone_ref_obj_id = 0;
if (reg->type != PTR_TO_STACK &&
reg->type != CONST_PTR_TO_DYNPTR) {
@@ -10343,12 +10701,28 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
if (is_kfunc_arg_uninit(btf, &args[i]))
dynptr_arg_type |= MEM_UNINIT;
- if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb])
+ if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
dynptr_arg_type |= DYNPTR_TYPE_SKB;
- else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp])
+ } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) {
dynptr_arg_type |= DYNPTR_TYPE_XDP;
+ } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] &&
+ (dynptr_arg_type & MEM_UNINIT)) {
+ enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type;
- ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type);
+ if (parent_type == BPF_DYNPTR_TYPE_INVALID) {
+ verbose(env, "verifier internal error: no dynptr type for parent of clone\n");
+ return -EFAULT;
+ }
+
+ dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type);
+ clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id;
+ if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) {
+ verbose(env, "verifier internal error: missing ref obj id for parent of clone\n");
+ return -EFAULT;
+ }
+ }
+
+ ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id);
if (ret < 0)
return ret;
@@ -10361,6 +10735,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
}
meta->initialized_dynptr.id = id;
meta->initialized_dynptr.type = dynptr_get_type(env, reg);
+ meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg);
}
break;
@@ -10464,13 +10839,17 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
break;
case KF_ARG_PTR_TO_MEM_SIZE:
{
+ struct bpf_reg_state *buff_reg = &regs[regno];
+ const struct btf_param *buff_arg = &args[i];
struct bpf_reg_state *size_reg = &regs[regno + 1];
const struct btf_param *size_arg = &args[i + 1];
- ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
- if (ret < 0) {
- verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
- return ret;
+ if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) {
+ ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
+ if (ret < 0) {
+ verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
+ return ret;
+ }
}
if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) {
@@ -15118,20 +15497,25 @@ static int propagate_precision(struct bpf_verifier_env *env,
struct bpf_reg_state *state_reg;
struct bpf_func_state *state;
int i, err = 0, fr;
+ bool first;
for (fr = old->curframe; fr >= 0; fr--) {
state = old->frame[fr];
state_reg = state->regs;
+ first = true;
for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
if (state_reg->type != SCALAR_VALUE ||
!state_reg->precise ||
!(state_reg->live & REG_LIVE_READ))
continue;
- if (env->log.level & BPF_LOG_LEVEL2)
- verbose(env, "frame %d: propagating r%d\n", fr, i);
- err = mark_chain_precision_frame(env, fr, i);
- if (err < 0)
- return err;
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ if (first)
+ verbose(env, "frame %d: propagating r%d", fr, i);
+ else
+ verbose(env, ",r%d", i);
+ }
+ bt_set_frame_reg(&env->bt, fr, i);
+ first = false;
}
for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
@@ -15142,14 +15526,24 @@ static int propagate_precision(struct bpf_verifier_env *env,
!state_reg->precise ||
!(state_reg->live & REG_LIVE_READ))
continue;
- if (env->log.level & BPF_LOG_LEVEL2)
- verbose(env, "frame %d: propagating fp%d\n",
- fr, (-i - 1) * BPF_REG_SIZE);
- err = mark_chain_precision_stack_frame(env, fr, i);
- if (err < 0)
- return err;
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ if (first)
+ verbose(env, "frame %d: propagating fp%d",
+ fr, (-i - 1) * BPF_REG_SIZE);
+ else
+ verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE);
+ }
+ bt_set_frame_slot(&env->bt, fr, i);
+ first = false;
}
+ if (!first)
+ verbose(env, "\n");
}
+
+ err = mark_chain_precision_batch(env);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -18806,6 +19200,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (!env)
return -ENOMEM;
+ env->bt.env = env;
+
len = (*prog)->len;
env->insn_aux_data =
vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 625d7483951c..9d809191a54f 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -57,6 +57,7 @@
#include <linux/file.h>
#include <linux/fs_parser.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/deadline.h>
#include <linux/psi.h>
#include <net/sock.h>
@@ -6683,6 +6684,9 @@ void cgroup_exit(struct task_struct *tsk)
list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
+ if (dl_task(tsk))
+ dec_dl_tasks_cs(tsk);
+
WARN_ON_ONCE(cgroup_task_frozen(tsk));
if (unlikely(!(tsk->flags & PF_KTHREAD) &&
test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags)))
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index e4ca2dd2b764..2c76fcd9f0bc 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -193,6 +193,14 @@ struct cpuset {
int use_parent_ecpus;
int child_ecpus_count;
+ /*
+ * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
+ * know when to rebuild associated root domain bandwidth information.
+ */
+ int nr_deadline_tasks;
+ int nr_migrate_dl_tasks;
+ u64 sum_migrate_dl_bw;
+
/* Invalid partition error code, not lock protected */
enum prs_errcode prs_err;
@@ -245,6 +253,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs)
return css_cs(cs->css.parent);
}
+void inc_dl_tasks_cs(struct task_struct *p)
+{
+ struct cpuset *cs = task_cs(p);
+
+ cs->nr_deadline_tasks++;
+}
+
+void dec_dl_tasks_cs(struct task_struct *p)
+{
+ struct cpuset *cs = task_cs(p);
+
+ cs->nr_deadline_tasks--;
+}
+
/* bits in struct cpuset flags field */
typedef enum {
CS_ONLINE,
@@ -366,22 +388,23 @@ static struct cpuset top_cpuset = {
if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
- * There are two global locks guarding cpuset structures - cpuset_rwsem and
+ * There are two global locks guarding cpuset structures - cpuset_mutex and
* callback_lock. We also require taking task_lock() when dereferencing a
* task's cpuset pointer. See "The task_lock() exception", at the end of this
- * comment. The cpuset code uses only cpuset_rwsem write lock. Other
- * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
- * prevent change to cpuset structures.
+ * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
+ * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
+ * structures. Note that cpuset_mutex needs to be a mutex as it is used in
+ * paths that rely on priority inheritance (e.g. scheduler - on RT) for
+ * correctness.
*
* A task must hold both locks to modify cpusets. If a task holds
- * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
- * is the only task able to also acquire callback_lock and be able to
- * modify cpusets. It can perform various checks on the cpuset structure
- * first, knowing nothing will change. It can also allocate memory while
- * just holding cpuset_rwsem. While it is performing these checks, various
- * callback routines can briefly acquire callback_lock to query cpusets.
- * Once it is ready to make the changes, it takes callback_lock, blocking
- * everyone else.
+ * cpuset_mutex, it blocks others, ensuring that it is the only task able to
+ * also acquire callback_lock and be able to modify cpusets. It can perform
+ * various checks on the cpuset structure first, knowing nothing will change.
+ * It can also allocate memory while just holding cpuset_mutex. While it is
+ * performing these checks, various callback routines can briefly acquire
+ * callback_lock to query cpusets. Once it is ready to make the changes, it
+ * takes callback_lock, blocking everyone else.
*
* Calls to the kernel memory allocator can not be made while holding
* callback_lock, as that would risk double tripping on callback_lock
@@ -403,16 +426,16 @@ static struct cpuset top_cpuset = {
* guidelines for accessing subsystem state in kernel/cgroup.c
*/
-DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
+static DEFINE_MUTEX(cpuset_mutex);
-void cpuset_read_lock(void)
+void cpuset_lock(void)
{
- percpu_down_read(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
}
-void cpuset_read_unlock(void)
+void cpuset_unlock(void)
{
- percpu_up_read(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
}
static DEFINE_SPINLOCK(callback_lock);
@@ -496,7 +519,7 @@ static inline bool partition_is_populated(struct cpuset *cs,
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
*
- * Call with callback_lock or cpuset_rwsem held.
+ * Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_cpus(struct task_struct *tsk,
struct cpumask *pmask)
@@ -538,7 +561,7 @@ out_unlock:
* One way or another, we guarantee to return some non-empty subset
* of node_states[N_MEMORY].
*
- * Call with callback_lock or cpuset_rwsem held.
+ * Call with callback_lock or cpuset_mutex held.
*/
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
@@ -550,7 +573,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
/*
* update task's spread flag if cpuset's page/slab spread flag is set
*
- * Call with callback_lock or cpuset_rwsem held. The check can be skipped
+ * Call with callback_lock or cpuset_mutex held. The check can be skipped
* if on default hierarchy.
*/
static void cpuset_update_task_spread_flags(struct cpuset *cs,
@@ -575,7 +598,7 @@ static void cpuset_update_task_spread_flags(struct cpuset *cs,
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding cpuset_rwsem.
+ * are only set if the other's are set. Call holding cpuset_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -713,7 +736,7 @@ out:
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
- * cpuset_rwsem held.
+ * cpuset_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -829,7 +852,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
rcu_read_unlock();
}
-/* Must be called with cpuset_rwsem held. */
+/* Must be called with cpuset_mutex held. */
static inline int nr_cpusets(void)
{
/* jump label reference count + the top-level cpuset */
@@ -855,7 +878,7 @@ static inline int nr_cpusets(void)
* domains when operating in the severe memory shortage situations
* that could cause allocation failures below.
*
- * Must be called with cpuset_rwsem held.
+ * Must be called with cpuset_mutex held.
*
* The three key local variables below are:
* cp - cpuset pointer, used (together with pos_css) to perform a
@@ -1066,11 +1089,14 @@ done:
return ndoms;
}
-static void update_tasks_root_domain(struct cpuset *cs)
+static void dl_update_tasks_root_domain(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
+ if (cs->nr_deadline_tasks == 0)
+ return;
+
css_task_iter_start(&cs->css, 0, &it);
while ((task = css_task_iter_next(&it)))
@@ -1079,12 +1105,12 @@ static void update_tasks_root_domain(struct cpuset *cs)
css_task_iter_end(&it);
}
-static void rebuild_root_domains(void)
+static void dl_rebuild_rd_accounting(void)
{
struct cpuset *cs = NULL;
struct cgroup_subsys_state *pos_css;
- percpu_rwsem_assert_held(&cpuset_rwsem);
+ lockdep_assert_held(&cpuset_mutex);
lockdep_assert_cpus_held();
lockdep_assert_held(&sched_domains_mutex);
@@ -1107,7 +1133,7 @@ static void rebuild_root_domains(void)
rcu_read_unlock();
- update_tasks_root_domain(cs);
+ dl_update_tasks_root_domain(cs);
rcu_read_lock();
css_put(&cs->css);
@@ -1121,7 +1147,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
{
mutex_lock(&sched_domains_mutex);
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
- rebuild_root_domains();
+ dl_rebuild_rd_accounting();
mutex_unlock(&sched_domains_mutex);
}
@@ -1134,7 +1160,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
* 'cpus' is removed, then call this routine to rebuild the
* scheduler's dynamic sched domains.
*
- * Call with cpuset_rwsem held. Takes cpus_read_lock().
+ * Call with cpuset_mutex held. Takes cpus_read_lock().
*/
static void rebuild_sched_domains_locked(void)
{
@@ -1145,7 +1171,7 @@ static void rebuild_sched_domains_locked(void)
int ndoms;
lockdep_assert_cpus_held();
- percpu_rwsem_assert_held(&cpuset_rwsem);
+ lockdep_assert_held(&cpuset_mutex);
/*
* If we have raced with CPU hotplug, return early to avoid
@@ -1196,9 +1222,9 @@ static void rebuild_sched_domains_locked(void)
void rebuild_sched_domains(void)
{
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
rebuild_sched_domains_locked();
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
@@ -1208,7 +1234,7 @@ void rebuild_sched_domains(void)
* @new_cpus: the temp variable for the new effective_cpus mask
*
* Iterate through each task of @cs updating its cpus_allowed to the
- * effective cpuset's. As this function is called with cpuset_rwsem held,
+ * effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
* is used instead of effective_cpus to make sure all offline CPUs are also
* included as hotplug code won't update cpumasks for tasks in top_cpuset.
@@ -1322,7 +1348,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
int old_prs, new_prs;
int part_error = PERR_NONE; /* Partition error? */
- percpu_rwsem_assert_held(&cpuset_rwsem);
+ lockdep_assert_held(&cpuset_mutex);
/*
* The parent must be a partition root.
@@ -1545,7 +1571,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
*
* On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
*
- * Called with cpuset_rwsem held
+ * Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
bool force)
@@ -1705,7 +1731,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
- percpu_rwsem_assert_held(&cpuset_rwsem);
+ lockdep_assert_held(&cpuset_mutex);
/*
* Check all its siblings and call update_cpumasks_hier()
@@ -1955,12 +1981,12 @@ static void *cpuset_being_rebound;
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
*
* Iterate through each task of @cs updating its mems_allowed to the
- * effective cpuset's. As this function is called with cpuset_rwsem held,
+ * effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/
static void update_tasks_nodemask(struct cpuset *cs)
{
- static nodemask_t newmems; /* protected by cpuset_rwsem */
+ static nodemask_t newmems; /* protected by cpuset_mutex */
struct css_task_iter it;
struct task_struct *task;
@@ -1973,7 +1999,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
* take while holding tasklist_lock. Forks can happen - the
* mpol_dup() cpuset_being_rebound check will catch such forks,
* and rebind their vma mempolicies too. Because we still hold
- * the global cpuset_rwsem, we know that no other rebind effort
+ * the global cpuset_mutex, we know that no other rebind effort
* will be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -2019,7 +2045,7 @@ static void update_tasks_nodemask(struct cpuset *cs)
*
* On legacy hierarchy, effective_mems will be the same with mems_allowed.
*
- * Called with cpuset_rwsem held
+ * Called with cpuset_mutex held
*/
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
{
@@ -2072,7 +2098,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
* mempolicies and if the cpuset is marked 'memory_migrate',
* migrate the tasks pages to the new memory.
*
- * Call with cpuset_rwsem held. May take callback_lock during call.
+ * Call with cpuset_mutex held. May take callback_lock during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_lock, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -2164,7 +2190,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
* @cs: the cpuset in which each task's spread flags needs to be changed
*
* Iterate through each task of @cs updating its spread flags. As this
- * function is called with cpuset_rwsem held, cpuset membership stays
+ * function is called with cpuset_mutex held, cpuset membership stays
* stable.
*/
static void update_tasks_flags(struct cpuset *cs)
@@ -2184,7 +2210,7 @@ static void update_tasks_flags(struct cpuset *cs)
* cs: the cpuset to update
* turning_on: whether the flag is being set or cleared
*
- * Call with cpuset_rwsem held.
+ * Call with cpuset_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
@@ -2234,7 +2260,7 @@ out:
* @new_prs: new partition root state
* Return: 0 if successful, != 0 if error
*
- * Call with cpuset_rwsem held.
+ * Call with cpuset_mutex held.
*/
static int update_prstate(struct cpuset *cs, int new_prs)
{
@@ -2472,19 +2498,26 @@ static int cpuset_can_attach_check(struct cpuset *cs)
return 0;
}
-/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
+static void reset_migrate_dl_data(struct cpuset *cs)
+{
+ cs->nr_migrate_dl_tasks = 0;
+ cs->sum_migrate_dl_bw = 0;
+}
+
+/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
- struct cpuset *cs;
+ struct cpuset *cs, *oldcs;
struct task_struct *task;
int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
+ oldcs = cpuset_attach_old_cs;
cs = css_cs(css);
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
@@ -2492,21 +2525,46 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
goto out_unlock;
cgroup_taskset_for_each(task, css, tset) {
- ret = task_can_attach(task, cs->effective_cpus);
+ ret = task_can_attach(task);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
+
+ if (dl_task(task)) {
+ cs->nr_migrate_dl_tasks++;
+ cs->sum_migrate_dl_bw += task->dl.dl_bw;
+ }
}
+ if (!cs->nr_migrate_dl_tasks)
+ goto out_success;
+
+ if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
+ int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
+
+ if (unlikely(cpu >= nr_cpu_ids)) {
+ reset_migrate_dl_data(cs);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
+ if (ret) {
+ reset_migrate_dl_data(cs);
+ goto out_unlock;
+ }
+ }
+
+out_success:
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
*/
cs->attach_in_progress++;
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
return ret;
}
@@ -2518,15 +2576,23 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
- percpu_up_write(&cpuset_rwsem);
+
+ if (cs->nr_migrate_dl_tasks) {
+ int cpu = cpumask_any(cs->effective_cpus);
+
+ dl_bw_free(cpu, cs->sum_migrate_dl_bw);
+ reset_migrate_dl_data(cs);
+ }
+
+ mutex_unlock(&cpuset_mutex);
}
/*
- * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
+ * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
* but we can't allocate it dynamically there. Define it global and
* allocate from cpuset_init().
*/
@@ -2535,7 +2601,7 @@ static nodemask_t cpuset_attach_nodemask_to;
static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
{
- percpu_rwsem_assert_held(&cpuset_rwsem);
+ lockdep_assert_held(&cpuset_mutex);
if (cs != &top_cpuset)
guarantee_online_cpus(task, cpus_attach);
@@ -2565,7 +2631,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cs = css_cs(css);
lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
cpus_updated = !cpumask_equal(cs->effective_cpus,
oldcs->effective_cpus);
mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
@@ -2622,11 +2688,17 @@ static void cpuset_attach(struct cgroup_taskset *tset)
out:
cs->old_mems_allowed = cpuset_attach_nodemask_to;
+ if (cs->nr_migrate_dl_tasks) {
+ cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
+ oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
+ reset_migrate_dl_data(cs);
+ }
+
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
}
/* The various types of files and directories in a cpuset file system */
@@ -2658,7 +2730,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
int retval = 0;
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs)) {
retval = -ENODEV;
goto out_unlock;
@@ -2694,7 +2766,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
@@ -2707,7 +2779,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
int retval = -ENODEV;
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -2720,7 +2792,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return retval;
}
@@ -2753,7 +2825,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
- * grabbing cpuset_rwsem anyway. This only happens on the legacy
+ * grabbing cpuset_mutex anyway. This only happens on the legacy
* hierarchies.
*/
css_get(&cs->css);
@@ -2761,7 +2833,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
flush_work(&cpuset_hotplug_work);
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -2785,7 +2857,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
free_cpuset(trialcs);
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
@@ -2933,13 +3005,13 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
css_get(&cs->css);
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
if (!is_cpuset_online(cs))
goto out_unlock;
retval = update_prstate(cs, val);
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
css_put(&cs->css);
return retval ?: nbytes;
@@ -3156,7 +3228,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
return 0;
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
set_bit(CS_ONLINE, &cs->flags);
if (is_spread_page(parent))
@@ -3207,7 +3279,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
return 0;
}
@@ -3228,7 +3300,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
struct cpuset *cs = css_cs(css);
cpus_read_lock();
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
if (is_partition_valid(cs))
update_prstate(cs, 0);
@@ -3247,7 +3319,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
cpuset_dec();
clear_bit(CS_ONLINE, &cs->flags);
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
}
@@ -3260,7 +3332,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
@@ -3273,7 +3345,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
}
spin_unlock_irq(&callback_lock);
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
}
/*
@@ -3294,14 +3366,14 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
return 0;
lockdep_assert_held(&cgroup_mutex);
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
/* Check to see if task is allowed in the cpuset */
ret = cpuset_can_attach_check(cs);
if (ret)
goto out_unlock;
- ret = task_can_attach(task, cs->effective_cpus);
+ ret = task_can_attach(task);
if (ret)
goto out_unlock;
@@ -3315,7 +3387,7 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
*/
cs->attach_in_progress++;
out_unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
return ret;
}
@@ -3331,11 +3403,11 @@ static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
if (same_cs)
return;
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
cs->attach_in_progress--;
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
}
/*
@@ -3363,7 +3435,7 @@ static void cpuset_fork(struct task_struct *task)
}
/* CLONE_INTO_CGROUP */
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
cpuset_attach_task(cs, task);
@@ -3371,7 +3443,7 @@ static void cpuset_fork(struct task_struct *task)
if (!cs->attach_in_progress)
wake_up(&cpuset_attach_wq);
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
}
struct cgroup_subsys cpuset_cgrp_subsys = {
@@ -3472,7 +3544,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
is_empty = cpumask_empty(cs->cpus_allowed) ||
nodes_empty(cs->mems_allowed);
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
/*
* Move tasks to the nearest ancestor with execution resources,
@@ -3482,7 +3554,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
if (is_empty)
remove_tasks_in_empty_cpuset(cs);
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
}
static void
@@ -3533,14 +3605,14 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
retry:
wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
/*
* We have raced with task attaching. We wait until attaching
* is finished, so we won't attach a task to an empty cpuset.
*/
if (cs->attach_in_progress) {
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
goto retry;
}
@@ -3637,7 +3709,7 @@ update_tasks:
cpus_updated, mems_updated);
unlock:
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
}
/**
@@ -3667,7 +3739,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
if (on_dfl && !alloc_cpumasks(NULL, &tmp))
ptmp = &tmp;
- percpu_down_write(&cpuset_rwsem);
+ mutex_lock(&cpuset_mutex);
/* fetch the available cpus/mems and find out which changed how */
cpumask_copy(&new_cpus, cpu_active_mask);
@@ -3724,7 +3796,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
update_tasks_nodemask(&top_cpuset);
}
- percpu_up_write(&cpuset_rwsem);
+ mutex_unlock(&cpuset_mutex);
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
@@ -4155,7 +4227,7 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
- * and we take cpuset_rwsem, keeping cpuset_attach() from changing it
+ * and we take cpuset_mutex, keeping cpuset_attach() from changing it
* anyway.
*/
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 9c4c55228567..2542c21b6b6d 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -171,7 +171,7 @@ __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
__diag_pop();
/* see cgroup_rstat_flush() */
-static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
+static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
__releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
{
int cpu;
@@ -207,9 +207,8 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
}
raw_spin_unlock_irqrestore(cpu_lock, flags);
- /* if @may_sleep, play nice and yield if necessary */
- if (may_sleep && (need_resched() ||
- spin_needbreak(&cgroup_rstat_lock))) {
+ /* play nice and yield if necessary */
+ if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
spin_unlock_irq(&cgroup_rstat_lock);
if (!cond_resched())
cpu_relax();
@@ -236,26 +235,11 @@ __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
might_sleep();
spin_lock_irq(&cgroup_rstat_lock);
- cgroup_rstat_flush_locked(cgrp, true);
+ cgroup_rstat_flush_locked(cgrp);
spin_unlock_irq(&cgroup_rstat_lock);
}
/**
- * cgroup_rstat_flush_atomic- atomic version of cgroup_rstat_flush()
- * @cgrp: target cgroup
- *
- * This function can be called from any context.
- */
-void cgroup_rstat_flush_atomic(struct cgroup *cgrp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&cgroup_rstat_lock, flags);
- cgroup_rstat_flush_locked(cgrp, false);
- spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
-}
-
-/**
* cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
* @cgrp: target cgroup
*
@@ -269,7 +253,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
{
might_sleep();
spin_lock_irq(&cgroup_rstat_lock);
- cgroup_rstat_flush_locked(cgrp, true);
+ cgroup_rstat_flush_locked(cgrp);
}
/**
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 490792b1066e..08455fb48ba6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -312,10 +312,10 @@ void __noreturn kthread_exit(long result)
* @comp: Completion to complete
* @code: The integer value to return to kthread_stop().
*
- * If present complete @comp and the reuturn code to kthread_stop().
+ * If present, complete @comp and then return code to kthread_stop().
*
* A kernel thread whose module may be removed after the completion of
- * @comp can use this function exit safely.
+ * @comp can use this function to exit safely.
*
* Does not return.
*/
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index dcd1d5bfc1e0..4dfd2f3e09b2 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
static inline bool usage_skip(struct lock_list *entry, void *mask)
{
+ if (entry->class->lock_type == LD_LOCK_NORMAL)
+ return false;
+
/*
* Skip local_lock() for irq inversion detection.
*
@@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
* As a result, we will skip local_lock(), when we search for irq
* inversion bugs.
*/
- if (entry->class->lock_type == LD_LOCK_PERCPU) {
- if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
- return false;
+ if (entry->class->lock_type == LD_LOCK_PERCPU &&
+ DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
+ return false;
- return true;
- }
+ /*
+ * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
+ * a lock and only used to override the wait_type.
+ */
- return false;
+ return true;
}
/*
@@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth;
- u8 prev_inner = hlock_class(prev)->wait_type_inner;
+ struct lock_class *class = hlock_class(prev);
+ u8 prev_inner = class->wait_type_inner;
if (prev_inner) {
/*
@@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
* Also due to trylocks.
*/
curr_inner = min(curr_inner, prev_inner);
+
+ /*
+ * Allow override for annotations -- this is typically
+ * only valid/needed for code that only exists when
+ * CONFIG_PREEMPT_RT=n.
+ */
+ if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
+ curr_inner = prev_inner;
}
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 944c3ae39861..ed0d7381b2ec 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7590,6 +7590,7 @@ static int __sched_setscheduler(struct task_struct *p,
int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct rq *rq;
+ bool cpuset_locked = false;
/* The pi code expects interrupts enabled */
BUG_ON(pi && in_interrupt());
@@ -7639,8 +7640,14 @@ recheck:
return retval;
}
- if (pi)
- cpuset_read_lock();
+ /*
+ * SCHED_DEADLINE bandwidth accounting relies on stable cpusets
+ * information.
+ */
+ if (dl_policy(policy) || dl_policy(p->policy)) {
+ cpuset_locked = true;
+ cpuset_lock();
+ }
/*
* Make sure no PI-waiters arrive (or leave) while we are
@@ -7716,8 +7723,8 @@ change:
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
task_rq_unlock(rq, p, &rf);
- if (pi)
- cpuset_read_unlock();
+ if (cpuset_locked)
+ cpuset_unlock();
goto recheck;
}
@@ -7784,7 +7791,8 @@ change:
task_rq_unlock(rq, p, &rf);
if (pi) {
- cpuset_read_unlock();
+ if (cpuset_locked)
+ cpuset_unlock();
rt_mutex_adjust_pi(p);
}
@@ -7796,8 +7804,8 @@ change:
unlock:
task_rq_unlock(rq, p, &rf);
- if (pi)
- cpuset_read_unlock();
+ if (cpuset_locked)
+ cpuset_unlock();
return retval;
}
@@ -9286,8 +9294,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
return ret;
}
-int task_can_attach(struct task_struct *p,
- const struct cpumask *cs_effective_cpus)
+int task_can_attach(struct task_struct *p)
{
int ret = 0;
@@ -9300,21 +9307,9 @@ int task_can_attach(struct task_struct *p,
* success of set_cpus_allowed_ptr() on all attached tasks
* before cpus_mask may be changed.
*/
- if (p->flags & PF_NO_SETAFFINITY) {
+ if (p->flags & PF_NO_SETAFFINITY)
ret = -EINVAL;
- goto out;
- }
- if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
- cs_effective_cpus)) {
- int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);
-
- if (unlikely(cpu >= nr_cpu_ids))
- return -EINVAL;
- ret = dl_cpu_busy(cpu, p);
- }
-
-out:
return ret;
}
@@ -9596,7 +9591,7 @@ static void cpuset_cpu_active(void)
static int cpuset_cpu_inactive(unsigned int cpu)
{
if (!cpuhp_tasks_frozen) {
- int ret = dl_cpu_busy(cpu, NULL);
+ int ret = dl_bw_check_overflow(cpu);
if (ret)
return ret;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 5a9a4b81c972..166c3e6eae61 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -16,6 +16,8 @@
* Fabio Checconi <fchecconi@gmail.com>
*/
+#include <linux/cpuset.h>
+
/*
* Default limits for DL period; on the top end we guard against small util
* tasks still getting ridiculously long effective runtimes, on the bottom end we
@@ -2596,6 +2598,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
+ /*
+ * In case a task is setscheduled out from SCHED_DEADLINE we need to
+ * keep track of that on its cpuset (for correct bandwidth tracking).
+ */
+ dec_dl_tasks_cs(p);
+
if (!task_on_rq_queued(p)) {
/*
* Inactive timer is armed. However, p is leaving DEADLINE and
@@ -2636,6 +2644,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
+ /*
+ * In case a task is setscheduled to SCHED_DEADLINE we need to keep
+ * track of that on its cpuset (for correct bandwidth tracking).
+ */
+ inc_dl_tasks_cs(p);
+
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
add_rq_bw(&p->dl, &rq->dl);
@@ -3044,26 +3058,38 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
return ret;
}
-int dl_cpu_busy(int cpu, struct task_struct *p)
+enum dl_bw_request {
+ dl_bw_req_check_overflow = 0,
+ dl_bw_req_alloc,
+ dl_bw_req_free
+};
+
+static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
{
- unsigned long flags, cap;
+ unsigned long flags;
struct dl_bw *dl_b;
- bool overflow;
+ bool overflow = 0;
rcu_read_lock_sched();
dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
- cap = dl_bw_capacity(cpu);
- overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
- if (!overflow && p) {
- /*
- * We reserve space for this task in the destination
- * root_domain, as we can't fail after this point.
- * We will free resources in the source root_domain
- * later on (see set_cpus_allowed_dl()).
- */
- __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
+ if (req == dl_bw_req_free) {
+ __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
+ } else {
+ unsigned long cap = dl_bw_capacity(cpu);
+
+ overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
+
+ if (req == dl_bw_req_alloc && !overflow) {
+ /*
+ * We reserve space in the destination
+ * root_domain, as we can't fail after this point.
+ * We will free resources in the source root_domain
+ * later on (see set_cpus_allowed_dl()).
+ */
+ __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
+ }
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
@@ -3071,6 +3097,21 @@ int dl_cpu_busy(int cpu, struct task_struct *p)
return overflow ? -EBUSY : 0;
}
+
+int dl_bw_check_overflow(int cpu)
+{
+ return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
+}
+
+int dl_bw_alloc(int cpu, u64 dl_bw)
+{
+ return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
+}
+
+void dl_bw_free(int cpu, u64 dl_bw)
+{
+ dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
+}
#endif
#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ec7b3e0a2b20..0ad712811e35 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -330,7 +330,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
extern bool __checkparam_dl(const struct sched_attr *attr);
extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int dl_cpu_busy(int cpu, struct task_struct *p);
+extern int dl_bw_check_overflow(int cpu);
#ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 860b2dcf3ac4..8b87a4dbd2d0 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -299,6 +299,7 @@ COND_SYSCALL(set_mempolicy);
COND_SYSCALL(migrate_pages);
COND_SYSCALL(move_pages);
COND_SYSCALL(set_mempolicy_home_node);
+COND_SYSCALL(cachestat);
COND_SYSCALL(perf_event_open);
COND_SYSCALL(accept4);
@@ -381,6 +382,7 @@ COND_SYSCALL(vm86old);
COND_SYSCALL(modify_ldt);
COND_SYSCALL(vm86);
COND_SYSCALL(kexec_file_load);
+COND_SYSCALL(map_shadow_stack);
/* s390 */
COND_SYSCALL(s390_pci_mmio_read);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9a050e36dc6c..2bc41e6ac9fe 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1349,9 +1349,9 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
}
return verify_pkcs7_signature(data_ptr->data,
- bpf_dynptr_get_size(data_ptr),
+ __bpf_dynptr_size(data_ptr),
sig_ptr->data,
- bpf_dynptr_get_size(sig_ptr),
+ __bpf_dynptr_size(sig_ptr),
trusted_keyring->key,
VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
NULL);
diff --git a/kernel/ucount.c b/kernel/ucount.c
index ee8e57fd6f90..2c04589a61ff 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -212,18 +212,16 @@ void put_ucounts(struct ucounts *ucounts)
}
}
-static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
+static inline bool atomic_long_inc_below(atomic_long_t *v, long u)
{
- long c, old;
- c = atomic_long_read(v);
- for (;;) {
+ long c = atomic_long_read(v);
+
+ do {
if (unlikely(c >= u))
return false;
- old = atomic_long_cmpxchg(v, c, c+1);
- if (likely(old == c))
- return true;
- c = old;
- }
+ } while (!atomic_long_try_cmpxchg(v, &c, c+1));
+
+ return true;
}
struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4666a1a92a31..74ad7d833a04 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -5012,6 +5012,8 @@ void show_one_workqueue(struct workqueue_struct *wq)
}
}
+/* Temporary export for flush_scheduled_work(). */
+EXPORT_SYMBOL(show_one_workqueue);
/**
* show_one_worker_pool - dump state of specified worker pool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ce51d4dc6803..f202648dead9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2302,9 +2302,13 @@ config TEST_XARRAY
tristate "Test the XArray code at runtime"
config TEST_MAPLE_TREE
- depends on DEBUG_KERNEL
- select DEBUG_MAPLE_TREE
- tristate "Test the Maple Tree code at runtime"
+ tristate "Test the Maple Tree code at runtime or module load"
+ help
+ Enable this option to test the maple tree code functions at boot, or
+ when the module is loaded. Enable "Debug Maple Trees" will enable
+ more verbose output on failures.
+
+ If unsure, say N.
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 003edc5ebd67..826c617b10a7 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -591,10 +591,21 @@ static void debug_objects_fill_pool(void)
{
/*
* On RT enabled kernels the pool refill must happen in preemptible
- * context:
+ * context -- for !RT kernels we rely on the fact that spinlock_t and
+ * raw_spinlock_t are basically the same type and this lock-type
+ * inversion works just fine.
*/
- if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+ /*
+ * Annotate away the spinlock_t inside raw_spinlock_t warning
+ * by temporarily raising the wait-type to WAIT_SLEEP, matching
+ * the preemptible() condition above.
+ */
+ static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
+ lock_map_acquire_try(&fill_pool_map);
fill_pool();
+ lock_map_release(&fill_pool_map);
+ }
}
static void
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 960223ed9199..53840e75792d 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -732,8 +732,6 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
return 0;
if (WARN_ON_ONCE(i->data_source))
return 0;
- if (unlikely(iov_iter_is_pipe(i)))
- return copy_page_to_iter_pipe(page, offset, bytes, i);
page += offset / PAGE_SIZE; // first subpage
offset %= PAGE_SIZE;
while (1) {
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 110a36479dce..b37065a6f570 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -194,7 +194,7 @@ static void mas_set_height(struct ma_state *mas)
unsigned int new_flags = mas->tree->ma_flags;
new_flags &= ~MT_FLAGS_HEIGHT_MASK;
- BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
+ MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
mas->tree->ma_flags = new_flags;
}
@@ -240,12 +240,12 @@ static inline void mas_set_err(struct ma_state *mas, long err)
mas->node = MA_ERROR(err);
}
-static inline bool mas_is_ptr(struct ma_state *mas)
+static inline bool mas_is_ptr(const struct ma_state *mas)
{
return mas->node == MAS_ROOT;
}
-static inline bool mas_is_start(struct ma_state *mas)
+static inline bool mas_is_start(const struct ma_state *mas)
{
return mas->node == MAS_START;
}
@@ -425,28 +425,26 @@ static inline unsigned long mte_parent_slot_mask(unsigned long parent)
}
/*
- * mas_parent_enum() - Return the maple_type of the parent from the stored
+ * mas_parent_type() - Return the maple_type of the parent from the stored
* parent type.
* @mas: The maple state
- * @node: The maple_enode to extract the parent's enum
+ * @enode: The maple_enode to extract the parent's enum
* Return: The node->parent maple_type
*/
static inline
-enum maple_type mte_parent_enum(struct maple_enode *p_enode,
- struct maple_tree *mt)
+enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
{
unsigned long p_type;
- p_type = (unsigned long)p_enode;
- if (p_type & MAPLE_PARENT_ROOT)
- return 0; /* Validated in the caller. */
+ p_type = (unsigned long)mte_to_node(enode)->parent;
+ if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
+ return 0;
p_type &= MAPLE_NODE_MASK;
- p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
-
+ p_type &= ~mte_parent_slot_mask(p_type);
switch (p_type) {
case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
- if (mt_is_alloc(mt))
+ if (mt_is_alloc(mas->tree))
return maple_arange_64;
return maple_range_64;
}
@@ -454,14 +452,8 @@ enum maple_type mte_parent_enum(struct maple_enode *p_enode,
return 0;
}
-static inline
-enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
-{
- return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
-}
-
/*
- * mte_set_parent() - Set the parent node and encode the slot
+ * mas_set_parent() - Set the parent node and encode the slot
* @enode: The encoded maple node.
* @parent: The encoded maple node that is the parent of @enode.
* @slot: The slot that @enode resides in @parent.
@@ -470,16 +462,16 @@ enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
* parent type.
*/
static inline
-void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
- unsigned char slot)
+void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
+ const struct maple_enode *parent, unsigned char slot)
{
unsigned long val = (unsigned long)parent;
unsigned long shift;
unsigned long type;
enum maple_type p_type = mte_node_type(parent);
- BUG_ON(p_type == maple_dense);
- BUG_ON(p_type == maple_leaf_64);
+ MAS_BUG_ON(mas, p_type == maple_dense);
+ MAS_BUG_ON(mas, p_type == maple_leaf_64);
switch (p_type) {
case maple_range_64:
@@ -671,22 +663,22 @@ static inline unsigned long *ma_gaps(struct maple_node *node,
}
/*
- * mte_pivot() - Get the pivot at @piv of the maple encoded node.
- * @mn: The maple encoded node.
+ * mas_pivot() - Get the pivot at @piv of the maple encoded node.
+ * @mas: The maple state.
* @piv: The pivot.
*
* Return: the pivot at @piv of @mn.
*/
-static inline unsigned long mte_pivot(const struct maple_enode *mn,
- unsigned char piv)
+static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
{
- struct maple_node *node = mte_to_node(mn);
- enum maple_type type = mte_node_type(mn);
+ struct maple_node *node = mas_mn(mas);
+ enum maple_type type = mte_node_type(mas->node);
- if (piv >= mt_pivots[type]) {
- WARN_ON(1);
+ if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
+ mas_set_err(mas, -EIO);
return 0;
}
+
switch (type) {
case maple_arange_64:
return node->ma64.pivot[piv];
@@ -971,8 +963,6 @@ static inline unsigned char ma_meta_end(struct maple_node *mn,
static inline unsigned char ma_meta_gap(struct maple_node *mn,
enum maple_type mt)
{
- BUG_ON(mt != maple_arange_64);
-
return mn->ma64.meta.gap;
}
@@ -1111,7 +1101,6 @@ static int mas_ascend(struct ma_state *mas)
enum maple_type a_type;
unsigned long min, max;
unsigned long *pivots;
- unsigned char offset;
bool set_max = false, set_min = false;
a_node = mas_mn(mas);
@@ -1123,8 +1112,9 @@ static int mas_ascend(struct ma_state *mas)
p_node = mte_parent(mas->node);
if (unlikely(a_node == p_node))
return 1;
- a_type = mas_parent_enum(mas, mas->node);
- offset = mte_parent_slot(mas->node);
+
+ a_type = mas_parent_type(mas, mas->node);
+ mas->offset = mte_parent_slot(mas->node);
a_enode = mt_mk_node(p_node, a_type);
/* Check to make sure all parent information is still accurate */
@@ -1132,7 +1122,6 @@ static int mas_ascend(struct ma_state *mas)
return 1;
mas->node = a_enode;
- mas->offset = offset;
if (mte_is_root(a_enode)) {
mas->max = ULONG_MAX;
@@ -1140,11 +1129,17 @@ static int mas_ascend(struct ma_state *mas)
return 0;
}
+ if (!mas->min)
+ set_min = true;
+
+ if (mas->max == ULONG_MAX)
+ set_max = true;
+
min = 0;
max = ULONG_MAX;
do {
p_enode = a_enode;
- a_type = mas_parent_enum(mas, p_enode);
+ a_type = mas_parent_type(mas, p_enode);
a_node = mte_parent(p_enode);
a_slot = mte_parent_slot(p_enode);
a_enode = mt_mk_node(a_node, a_type);
@@ -1401,9 +1396,9 @@ static inline struct maple_enode *mas_start(struct ma_state *mas)
mas->min = 0;
mas->max = ULONG_MAX;
- mas->depth = 0;
retry:
+ mas->depth = 0;
root = mas_root(mas);
/* Tree with nodes */
if (likely(xa_is_node(root))) {
@@ -1631,6 +1626,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas)
return mas_leaf_max_gap(mas);
node = mas_mn(mas);
+ MAS_BUG_ON(mas, mt != maple_arange_64);
offset = ma_meta_gap(node, mt);
if (offset == MAPLE_ARANGE64_META_MAX)
return 0;
@@ -1659,11 +1655,12 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
enum maple_type pmt;
pnode = mte_parent(mas->node);
- pmt = mas_parent_enum(mas, mas->node);
+ pmt = mas_parent_type(mas, mas->node);
penode = mt_mk_node(pnode, pmt);
pgaps = ma_gaps(pnode, pmt);
ascend:
+ MAS_BUG_ON(mas, pmt != maple_arange_64);
meta_offset = ma_meta_gap(pnode, pmt);
if (meta_offset == MAPLE_ARANGE64_META_MAX)
meta_gap = 0;
@@ -1691,7 +1688,7 @@ ascend:
/* Go to the parent node. */
pnode = mte_parent(penode);
- pmt = mas_parent_enum(mas, penode);
+ pmt = mas_parent_type(mas, penode);
pgaps = ma_gaps(pnode, pmt);
offset = mte_parent_slot(penode);
penode = mt_mk_node(pnode, pmt);
@@ -1718,7 +1715,7 @@ static inline void mas_update_gap(struct ma_state *mas)
pslot = mte_parent_slot(mas->node);
p_gap = ma_gaps(mte_parent(mas->node),
- mas_parent_enum(mas, mas->node))[pslot];
+ mas_parent_type(mas, mas->node))[pslot];
if (p_gap != max_gap)
mas_parent_gap(mas, pslot, max_gap);
@@ -1743,7 +1740,7 @@ static inline void mas_adopt_children(struct ma_state *mas,
offset = ma_data_end(node, type, pivots, mas->max);
do {
child = mas_slot_locked(mas, slots, offset);
- mte_set_parent(child, parent, offset);
+ mas_set_parent(mas, child, parent, offset);
} while (offset--);
}
@@ -1767,7 +1764,7 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
} else {
offset = mte_parent_slot(mas->node);
slots = ma_slots(mte_parent(mas->node),
- mas_parent_enum(mas, mas->node));
+ mas_parent_type(mas, mas->node));
old_enode = mas_slot_locked(mas, slots, offset);
}
@@ -1943,8 +1940,9 @@ static inline int mab_calc_split(struct ma_state *mas,
* causes one node to be deficient.
* NOTE: mt_min_slots is 1 based, b_end and split are zero.
*/
- while (((bn->pivot[split] - min) < slot_count - 1) &&
- (split < slot_count - 1) && (b_end - split > slot_min))
+ while ((split < slot_count - 1) &&
+ ((bn->pivot[split] - min) < slot_count - 1) &&
+ (b_end - split > slot_min))
split++;
}
@@ -2347,7 +2345,8 @@ static inline void mas_topiary_range(struct ma_state *mas,
void __rcu **slots;
unsigned char offset;
- MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
+ MAS_BUG_ON(mas, mte_is_leaf(mas->node));
+
slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
for (offset = start; offset <= end; offset++) {
struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
@@ -2707,9 +2706,9 @@ static inline void mas_set_split_parent(struct ma_state *mas,
return;
if ((*slot) <= split)
- mte_set_parent(mas->node, left, *slot);
+ mas_set_parent(mas, mas->node, left, *slot);
else if (right)
- mte_set_parent(mas->node, right, (*slot) - split - 1);
+ mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
(*slot)++;
}
@@ -3106,12 +3105,12 @@ static int mas_spanning_rebalance(struct ma_state *mas,
mte_node_type(mast->orig_l->node));
mast->orig_l->depth++;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
- mte_set_parent(left, l_mas.node, slot);
+ mas_set_parent(mas, left, l_mas.node, slot);
if (middle)
- mte_set_parent(middle, l_mas.node, ++slot);
+ mas_set_parent(mas, middle, l_mas.node, ++slot);
if (right)
- mte_set_parent(right, l_mas.node, ++slot);
+ mas_set_parent(mas, right, l_mas.node, ++slot);
if (mas_is_root_limits(mast->l)) {
new_root:
@@ -3250,7 +3249,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
l_mas.max = l_pivs[split];
mas->min = l_mas.max + 1;
eparent = mt_mk_node(mte_parent(l_mas.node),
- mas_parent_enum(&l_mas, l_mas.node));
+ mas_parent_type(&l_mas, l_mas.node));
tmp += end;
if (!in_rcu) {
unsigned char max_p = mt_pivots[mt];
@@ -3293,7 +3292,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end
/* replace parent. */
offset = mte_parent_slot(mas->node);
- mt = mas_parent_enum(&l_mas, l_mas.node);
+ mt = mas_parent_type(&l_mas, l_mas.node);
parent = mas_pop_node(mas);
slots = ma_slots(parent, mt);
pivs = ma_pivots(parent, mt);
@@ -3338,8 +3337,8 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast,
* The Big_node data should just fit in a single node.
*/
ancestor = mas_new_ma_node(mas, mast->bn);
- mte_set_parent(mast->l->node, ancestor, mast->l->offset);
- mte_set_parent(mast->r->node, ancestor, mast->r->offset);
+ mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
+ mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
mast->l->node = ancestor;
@@ -4263,11 +4262,13 @@ done:
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
- while ((wr_mas->mas->last > wr_mas->end_piv) &&
- (wr_mas->offset_end < wr_mas->node_end))
- wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
+ while ((wr_mas->offset_end < wr_mas->node_end) &&
+ (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
+ wr_mas->offset_end++;
- if (wr_mas->mas->last > wr_mas->end_piv)
+ if (wr_mas->offset_end < wr_mas->node_end)
+ wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
+ else
wr_mas->end_piv = wr_mas->mas->max;
}
@@ -4424,7 +4425,6 @@ static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
}
/* At this point, we are at the leaf node that needs to be altered. */
- wr_mas->end_piv = wr_mas->r_max;
mas_wr_end_piv(wr_mas);
if (!wr_mas->entry)
@@ -4498,6 +4498,25 @@ exists:
}
+static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+{
+retry:
+ mas_set(mas, index);
+ mas_state_walk(mas);
+ if (mas_is_start(mas))
+ goto retry;
+}
+
+static inline bool mas_rewalk_if_dead(struct ma_state *mas,
+ struct maple_node *node, const unsigned long index)
+{
+ if (unlikely(ma_dead_node(node))) {
+ mas_rewalk(mas, index);
+ return true;
+ }
+ return false;
+}
+
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
* tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
@@ -4513,15 +4532,19 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
int offset, level;
void __rcu **slots;
struct maple_node *node;
- struct maple_enode *enode;
unsigned long *pivots;
+ unsigned long max;
- if (mas_is_none(mas))
- return 0;
+ node = mas_mn(mas);
+ if (!mas->min)
+ goto no_entry;
+
+ max = mas->min - 1;
+ if (max < min)
+ goto no_entry;
level = 0;
do {
- node = mas_mn(mas);
if (ma_is_root(node))
goto no_entry;
@@ -4530,64 +4553,41 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
return 1;
offset = mas->offset;
level++;
+ node = mas_mn(mas);
} while (!offset);
offset--;
mt = mte_node_type(mas->node);
- node = mas_mn(mas);
- slots = ma_slots(node, mt);
- pivots = ma_pivots(node, mt);
- if (unlikely(ma_dead_node(node)))
- return 1;
-
- mas->max = pivots[offset];
- if (offset)
- mas->min = pivots[offset - 1] + 1;
- if (unlikely(ma_dead_node(node)))
- return 1;
-
- if (mas->max < min)
- goto no_entry_min;
-
while (level > 1) {
level--;
- enode = mas_slot(mas, slots, offset);
+ slots = ma_slots(node, mt);
+ mas->node = mas_slot(mas, slots, offset);
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = enode;
mt = mte_node_type(mas->node);
node = mas_mn(mas);
- slots = ma_slots(node, mt);
pivots = ma_pivots(node, mt);
- offset = ma_data_end(node, mt, pivots, mas->max);
+ offset = ma_data_end(node, mt, pivots, max);
if (unlikely(ma_dead_node(node)))
return 1;
-
- if (offset)
- mas->min = pivots[offset - 1] + 1;
-
- if (offset < mt_pivots[mt])
- mas->max = pivots[offset];
-
- if (mas->max < min)
- goto no_entry;
}
+ slots = ma_slots(node, mt);
mas->node = mas_slot(mas, slots, offset);
+ pivots = ma_pivots(node, mt);
if (unlikely(ma_dead_node(node)))
return 1;
+ if (likely(offset))
+ mas->min = pivots[offset - 1] + 1;
+ mas->max = max;
mas->offset = mas_data_end(mas);
if (unlikely(mte_dead_node(mas->node)))
return 1;
return 0;
-no_entry_min:
- mas->offset = offset;
- if (offset)
- mas->min = pivots[offset - 1] + 1;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
@@ -4597,6 +4597,76 @@ no_entry:
}
/*
+ * mas_prev_slot() - Get the entry in the previous slot
+ *
+ * @mas: The maple state
+ * @max: The minimum starting range
+ *
+ * Return: The entry in the previous slot which is possibly NULL
+ */
+void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
+{
+ void *entry;
+ void __rcu **slots;
+ unsigned long pivot;
+ enum maple_type type;
+ unsigned long *pivots;
+ struct maple_node *node;
+ unsigned long save_point = mas->index;
+
+retry:
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+again:
+ if (mas->min <= min) {
+ pivot = mas_safe_min(mas, pivots, mas->offset);
+
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (pivot <= min)
+ return NULL;
+ }
+
+ if (likely(mas->offset)) {
+ mas->offset--;
+ mas->last = mas->index - 1;
+ mas->index = mas_safe_min(mas, pivots, mas->offset);
+ } else {
+ if (mas_prev_node(mas, min)) {
+ mas_rewalk(mas, save_point);
+ goto retry;
+ }
+
+ if (mas_is_none(mas))
+ return NULL;
+
+ mas->last = mas->max;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ mas->index = pivots[mas->offset - 1] + 1;
+ }
+
+ slots = ma_slots(node, type);
+ entry = mas_slot(mas, slots, mas->offset);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
+
+ if (likely(entry))
+ return entry;
+
+ if (!empty)
+ goto again;
+
+ return entry;
+}
+
+/*
* mas_next_node() - Get the next node at the same level in the tree.
* @mas: The maple state
* @max: The maximum pivot value to check.
@@ -4607,11 +4677,10 @@ no_entry:
static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
unsigned long max)
{
- unsigned long min, pivot;
+ unsigned long min;
unsigned long *pivots;
struct maple_enode *enode;
int level = 0;
- unsigned char offset;
unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
@@ -4619,19 +4688,16 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
if (mas->max >= max)
goto no_entry;
+ min = mas->max + 1;
level = 0;
do {
if (ma_is_root(node))
goto no_entry;
- min = mas->max + 1;
- if (min > max)
- goto no_entry;
-
+ /* Walk up. */
if (unlikely(mas_ascend(mas)))
return 1;
- offset = mas->offset;
level++;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
@@ -4640,36 +4706,37 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
if (unlikely(ma_dead_node(node)))
return 1;
- } while (unlikely(offset == node_end));
+ } while (unlikely(mas->offset == node_end));
slots = ma_slots(node, mt);
- pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
- while (unlikely(level > 1)) {
- /* Descend, if necessary */
- enode = mas_slot(mas, slots, offset);
- if (unlikely(ma_dead_node(node)))
- return 1;
+ mas->offset++;
+ enode = mas_slot(mas, slots, mas->offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
- mas->node = enode;
+ if (level > 1)
+ mas->offset = 0;
+
+ while (unlikely(level > 1)) {
level--;
+ mas->node = enode;
node = mas_mn(mas);
mt = mte_node_type(mas->node);
slots = ma_slots(node, mt);
- pivots = ma_pivots(node, mt);
+ enode = mas_slot(mas, slots, 0);
if (unlikely(ma_dead_node(node)))
return 1;
-
- offset = 0;
- pivot = pivots[0];
}
- enode = mas_slot(mas, slots, offset);
+ if (!mas->offset)
+ pivots = ma_pivots(node, mt);
+
+ mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
if (unlikely(ma_dead_node(node)))
return 1;
mas->node = enode;
mas->min = min;
- mas->max = pivot;
return 0;
no_entry:
@@ -4681,92 +4748,88 @@ no_entry:
}
/*
- * mas_next_nentry() - Get the next node entry
- * @mas: The maple state
- * @max: The maximum value to check
- * @*range_start: Pointer to store the start of the range.
+ * mas_next_slot() - Get the entry in the next slot
*
- * Sets @mas->offset to the offset of the next node entry, @mas->last to the
- * pivot of the entry.
+ * @mas: The maple state
+ * @max: The maximum starting range
+ * @empty: Can be empty
*
- * Return: The next entry, %NULL otherwise
+ * Return: The entry in the next slot which is possibly NULL
*/
-static inline void *mas_next_nentry(struct ma_state *mas,
- struct maple_node *node, unsigned long max, enum maple_type type)
+void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
{
- unsigned char count;
- unsigned long pivot;
- unsigned long *pivots;
void __rcu **slots;
+ unsigned long *pivots;
+ unsigned long pivot;
+ enum maple_type type;
+ struct maple_node *node;
+ unsigned char data_end;
+ unsigned long save_point = mas->last;
void *entry;
- if (mas->last == mas->max) {
- mas->index = mas->max;
- return NULL;
- }
-
- slots = ma_slots(node, type);
+retry:
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
- count = ma_data_end(node, type, pivots, mas->max);
- if (unlikely(ma_dead_node(node)))
- return NULL;
-
- mas->index = mas_safe_min(mas, pivots, mas->offset);
- if (unlikely(ma_dead_node(node)))
- return NULL;
-
- if (mas->index > max)
- return NULL;
-
- if (mas->offset > count)
- return NULL;
+ data_end = ma_data_end(node, type, pivots, mas->max);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
- while (mas->offset < count) {
- pivot = pivots[mas->offset];
- entry = mas_slot(mas, slots, mas->offset);
- if (ma_dead_node(node))
- return NULL;
+again:
+ if (mas->max >= max) {
+ if (likely(mas->offset < data_end))
+ pivot = pivots[mas->offset];
+ else
+ return NULL; /* must be mas->max */
- if (entry)
- goto found;
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
if (pivot >= max)
return NULL;
+ }
- mas->index = pivot + 1;
+ if (likely(mas->offset < data_end)) {
+ mas->index = pivots[mas->offset] + 1;
mas->offset++;
- }
+ if (likely(mas->offset < data_end))
+ mas->last = pivots[mas->offset];
+ else
+ mas->last = mas->max;
+ } else {
+ if (mas_next_node(mas, node, max)) {
+ mas_rewalk(mas, save_point);
+ goto retry;
+ }
- if (mas->index > mas->max) {
- mas->index = mas->last;
- return NULL;
+ if (mas_is_none(mas))
+ return NULL;
+
+ mas->offset = 0;
+ mas->index = mas->min;
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ mas->last = pivots[0];
}
- pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
- entry = mas_slot(mas, slots, mas->offset);
- if (ma_dead_node(node))
- return NULL;
+ slots = ma_slots(node, type);
+ entry = mt_slot(mas->tree, slots, mas->offset);
+ if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
+ goto retry;
- if (!pivot)
- return NULL;
+ if (entry)
+ return entry;
- if (!entry)
- return NULL;
+ if (!empty) {
+ if (!mas->offset)
+ data_end = 2;
+ goto again;
+ }
-found:
- mas->last = pivot;
return entry;
}
-static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
-{
-retry:
- mas_set(mas, index);
- mas_state_walk(mas);
- if (mas_is_start(mas))
- goto retry;
-}
-
/*
* mas_next_entry() - Internal function to get the next entry.
* @mas: The maple state
@@ -4781,155 +4844,10 @@ retry:
*/
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
- void *entry = NULL;
- struct maple_enode *prev_node;
- struct maple_node *node;
- unsigned char offset;
- unsigned long last;
- enum maple_type mt;
-
- if (mas->index > limit) {
- mas->index = mas->last = limit;
- mas_pause(mas);
+ if (mas->last >= limit)
return NULL;
- }
- last = mas->last;
-retry:
- offset = mas->offset;
- prev_node = mas->node;
- node = mas_mn(mas);
- mt = mte_node_type(mas->node);
- mas->offset++;
- if (unlikely(mas->offset >= mt_slots[mt])) {
- mas->offset = mt_slots[mt] - 1;
- goto next_node;
- }
-
- while (!mas_is_none(mas)) {
- entry = mas_next_nentry(mas, node, limit, mt);
- if (unlikely(ma_dead_node(node))) {
- mas_rewalk(mas, last);
- goto retry;
- }
-
- if (likely(entry))
- return entry;
-
- if (unlikely((mas->index > limit)))
- break;
-next_node:
- prev_node = mas->node;
- offset = mas->offset;
- if (unlikely(mas_next_node(mas, node, limit))) {
- mas_rewalk(mas, last);
- goto retry;
- }
- mas->offset = 0;
- node = mas_mn(mas);
- mt = mte_node_type(mas->node);
- }
-
- mas->index = mas->last = limit;
- mas->offset = offset;
- mas->node = prev_node;
- return NULL;
-}
-
-/*
- * mas_prev_nentry() - Get the previous node entry.
- * @mas: The maple state.
- * @limit: The lower limit to check for a value.
- *
- * Return: the entry, %NULL otherwise.
- */
-static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
- unsigned long index)
-{
- unsigned long pivot, min;
- unsigned char offset;
- struct maple_node *mn;
- enum maple_type mt;
- unsigned long *pivots;
- void __rcu **slots;
- void *entry;
-
-retry:
- if (!mas->offset)
- return NULL;
-
- mn = mas_mn(mas);
- mt = mte_node_type(mas->node);
- offset = mas->offset - 1;
- if (offset >= mt_slots[mt])
- offset = mt_slots[mt] - 1;
-
- slots = ma_slots(mn, mt);
- pivots = ma_pivots(mn, mt);
- if (unlikely(ma_dead_node(mn))) {
- mas_rewalk(mas, index);
- goto retry;
- }
-
- if (offset == mt_pivots[mt])
- pivot = mas->max;
- else
- pivot = pivots[offset];
-
- if (unlikely(ma_dead_node(mn))) {
- mas_rewalk(mas, index);
- goto retry;
- }
-
- while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
- !pivot))
- pivot = pivots[--offset];
-
- min = mas_safe_min(mas, pivots, offset);
- entry = mas_slot(mas, slots, offset);
- if (unlikely(ma_dead_node(mn))) {
- mas_rewalk(mas, index);
- goto retry;
- }
-
- if (likely(entry)) {
- mas->offset = offset;
- mas->last = pivot;
- mas->index = min;
- }
- return entry;
-}
-
-static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
-{
- void *entry;
-
- if (mas->index < min) {
- mas->index = mas->last = min;
- mas->node = MAS_NONE;
- return NULL;
- }
-retry:
- while (likely(!mas_is_none(mas))) {
- entry = mas_prev_nentry(mas, min, mas->index);
- if (unlikely(mas->last < min))
- goto not_found;
-
- if (likely(entry))
- return entry;
-
- if (unlikely(mas_prev_node(mas, min))) {
- mas_rewalk(mas, mas->index);
- goto retry;
- }
-
- mas->offset++;
- }
-
- mas->offset--;
-not_found:
- mas->index = mas->last = min;
- return NULL;
+ return mas_next_slot(mas, limit, false);
}
/*
@@ -5105,24 +5023,25 @@ void *mas_walk(struct ma_state *mas)
{
void *entry;
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
retry:
entry = mas_state_walk(mas);
- if (mas_is_start(mas))
+ if (mas_is_start(mas)) {
goto retry;
-
- if (mas_is_ptr(mas)) {
+ } else if (mas_is_none(mas)) {
+ mas->index = 0;
+ mas->last = ULONG_MAX;
+ } else if (mas_is_ptr(mas)) {
if (!mas->index) {
mas->last = 0;
- } else {
- mas->index = 1;
- mas->last = ULONG_MAX;
+ return entry;
}
- return entry;
- }
- if (mas_is_none(mas)) {
- mas->index = 0;
+ mas->index = 1;
mas->last = ULONG_MAX;
+ mas->node = MAS_NONE;
+ return NULL;
}
return entry;
@@ -5289,7 +5208,10 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned long *pivots;
enum maple_type mt;
- if (min >= max)
+ if (min > max)
+ return -EINVAL;
+
+ if (size == 0 || max - min < size - 1)
return -EINVAL;
if (mas_is_start(mas))
@@ -5317,15 +5239,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
mt = mte_node_type(mas->node);
pivots = ma_pivots(mas_mn(mas), mt);
- if (offset)
- mas->min = pivots[offset - 1] + 1;
-
- if (offset < mt_pivots[mt])
- mas->max = pivots[offset];
-
- if (mas->index < mas->min)
- mas->index = mas->min;
-
+ min = mas_safe_min(mas, pivots, offset);
+ if (mas->index < min)
+ mas->index = min;
mas->last = mas->index + size - 1;
return 0;
}
@@ -5344,7 +5260,10 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
{
struct maple_enode *last = mas->node;
- if (min >= max)
+ if (min > max)
+ return -EINVAL;
+
+ if (size == 0 || max - min < size - 1)
return -EINVAL;
if (mas_is_start(mas)) {
@@ -5380,7 +5299,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
return -EBUSY;
/* Trim the upper limit to the max. */
- if (max <= mas->last)
+ if (max < mas->last)
mas->last = max;
mas->index = mas->last - size + 1;
@@ -5400,8 +5319,8 @@ static inline int mas_alloc(struct ma_state *mas, void *entry,
return xa_err(mas->node);
if (!mas->index)
- return mte_pivot(mas->node, 0);
- return mte_pivot(mas->node, 1);
+ return mas_pivot(mas, 0);
+ return mas_pivot(mas, 1);
}
/* Must be walking a tree. */
@@ -5418,7 +5337,10 @@ static inline int mas_alloc(struct ma_state *mas, void *entry,
*/
min = mas->min;
if (mas->offset)
- min = mte_pivot(mas->node, mas->offset - 1) + 1;
+ min = mas_pivot(mas, mas->offset - 1) + 1;
+
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
if (mas->index < min)
mas->index = min;
@@ -5700,9 +5622,9 @@ void *mas_store(struct ma_state *mas, void *entry)
trace_ma_write(__func__, mas, 0, entry);
#ifdef CONFIG_DEBUG_MAPLE_TREE
- if (mas->index > mas->last)
- pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
- MT_BUG_ON(mas->tree, mas->index > mas->last);
+ if (MAS_WARN_ON(mas, mas->index > mas->last))
+ pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
+
if (mas->index > mas->last) {
mas_set_err(mas, -EINVAL);
return NULL;
@@ -5762,7 +5684,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
mas_wr_store_setup(&wr_mas);
trace_ma_write(__func__, mas, 0, entry);
mas_wr_store_entry(&wr_mas);
- BUG_ON(mas_is_err(mas));
+ MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
mas_destroy(mas);
}
EXPORT_SYMBOL_GPL(mas_store_prealloc);
@@ -5814,9 +5736,7 @@ void mas_destroy(struct ma_state *mas)
if (mas->mas_flags & MA_STATE_REBALANCE) {
unsigned char end;
- if (mas_is_start(mas))
- mas_start(mas);
-
+ mas_start(mas);
mtree_range_walk(mas);
end = mas_data_end(mas) + 1;
if (end < mt_min_slot_count(mas->node) - 1)
@@ -5906,6 +5826,34 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
}
EXPORT_SYMBOL_GPL(mas_expected_entries);
+static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+ void **entry)
+{
+ bool was_none = mas_is_none(mas);
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+
+ if (mas_is_start(mas))
+ *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+
+ if (mas_is_ptr(mas)) {
+ *entry = NULL;
+ if (was_none && mas->index == 0) {
+ mas->index = mas->last = 0;
+ return true;
+ }
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ mas->node = MAS_NONE;
+ return true;
+ }
+
+ if (mas_is_none(mas))
+ return true;
+ return false;
+}
+
/**
* mas_next() - Get the next entry.
* @mas: The maple state
@@ -5919,27 +5867,38 @@ EXPORT_SYMBOL_GPL(mas_expected_entries);
*/
void *mas_next(struct ma_state *mas, unsigned long max)
{
- if (mas_is_none(mas) || mas_is_paused(mas))
- mas->node = MAS_START;
+ void *entry = NULL;
- if (mas_is_start(mas))
- mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ if (mas_next_setup(mas, max, &entry))
+ return entry;
- if (mas_is_ptr(mas)) {
- if (!mas->index) {
- mas->index = 1;
- mas->last = ULONG_MAX;
- }
- return NULL;
- }
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, false);
+}
+EXPORT_SYMBOL_GPL(mas_next);
- if (mas->last == ULONG_MAX)
- return NULL;
+/**
+ * mas_next_range() - Advance the maple state to the next range
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Sets @mas->index and @mas->last to the range.
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next_range(struct ma_state *mas, unsigned long max)
+{
+ void *entry = NULL;
+
+ if (mas_next_setup(mas, max, &entry))
+ return entry;
- /* Retries on dead nodes handled by mas_next_entry */
- return mas_next_entry(mas, max);
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, true);
}
-EXPORT_SYMBOL_GPL(mas_next);
+EXPORT_SYMBOL_GPL(mas_next_range);
/**
* mt_next() - get the next value in the maple tree
@@ -5961,6 +5920,47 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
}
EXPORT_SYMBOL_GPL(mt_next);
+static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
+ void **entry)
+{
+ if (mas->index <= min)
+ goto none;
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+
+ if (mas_is_start(mas)) {
+ mas_walk(mas);
+ if (!mas->index)
+ goto none;
+ }
+
+ if (unlikely(mas_is_ptr(mas))) {
+ if (!mas->index)
+ goto none;
+ mas->index = mas->last = 0;
+ *entry = mas_root(mas);
+ return true;
+ }
+
+ if (mas_is_none(mas)) {
+ if (mas->index) {
+ /* Walked to out-of-range pointer? */
+ mas->index = mas->last = 0;
+ mas->node = MAS_ROOT;
+ *entry = mas_root(mas);
+ return true;
+ }
+ return true;
+ }
+
+ return false;
+
+none:
+ mas->node = MAS_NONE;
+ return true;
+}
+
/**
* mas_prev() - Get the previous entry
* @mas: The maple state
@@ -5974,37 +5974,37 @@ EXPORT_SYMBOL_GPL(mt_next);
*/
void *mas_prev(struct ma_state *mas, unsigned long min)
{
- if (!mas->index) {
- /* Nothing comes before 0 */
- mas->last = 0;
- mas->node = MAS_NONE;
- return NULL;
- }
+ void *entry = NULL;
- if (unlikely(mas_is_ptr(mas)))
- return NULL;
+ if (mas_prev_setup(mas, min, &entry))
+ return entry;
- if (mas_is_none(mas) || mas_is_paused(mas))
- mas->node = MAS_START;
+ return mas_prev_slot(mas, min, false);
+}
+EXPORT_SYMBOL_GPL(mas_prev);
- if (mas_is_start(mas)) {
- mas_walk(mas);
- if (!mas->index)
- return NULL;
- }
+/**
+ * mas_prev_range() - Advance to the previous range
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Sets @mas->index and @mas->last to the range.
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * searchable nodes.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev_range(struct ma_state *mas, unsigned long min)
+{
+ void *entry = NULL;
- if (mas_is_ptr(mas)) {
- if (!mas->index) {
- mas->last = 0;
- return NULL;
- }
+ if (mas_prev_setup(mas, min, &entry))
+ return entry;
- mas->index = mas->last = 0;
- return mas_root_locked(mas);
- }
- return mas_prev_entry(mas, min);
+ return mas_prev_slot(mas, min, true);
}
-EXPORT_SYMBOL_GPL(mas_prev);
+EXPORT_SYMBOL_GPL(mas_prev_slot);
/**
* mt_prev() - get the previous value in the maple tree
@@ -6046,6 +6046,61 @@ void mas_pause(struct ma_state *mas)
EXPORT_SYMBOL_GPL(mas_pause);
/**
+ * mas_find_setup() - Internal function to set up mas_find*().
+ *
+ * Returns: True if entry is the answer, false otherwise.
+ */
+static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
+ void **entry)
+{
+ *entry = NULL;
+
+ if (unlikely(mas_is_none(mas))) {
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->index = mas->last;
+ mas->node = MAS_START;
+ } else if (unlikely(mas_is_paused(mas))) {
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->node = MAS_START;
+ mas->index = ++mas->last;
+ } else if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+
+ if (unlikely(mas_is_start(mas))) {
+ /* First run or continue */
+ if (mas->index > max)
+ return true;
+
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+
+ }
+
+ if (unlikely(!mas_searchable(mas))) {
+ if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+
+ return true;
+ }
+
+ if (mas->index == max)
+ return true;
+
+ return false;
+
+ptr_out_of_range:
+ mas->node = MAS_NONE;
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ return true;
+}
+
+/**
* mas_find() - On the first call, find the entry at or after mas->index up to
* %max. Otherwise, find the entry after mas->index.
* @mas: The maple state
@@ -6059,37 +6114,102 @@ EXPORT_SYMBOL_GPL(mas_pause);
*/
void *mas_find(struct ma_state *mas, unsigned long max)
{
+ void *entry = NULL;
+
+ if (mas_find_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, false);
+}
+EXPORT_SYMBOL_GPL(mas_find);
+
+/**
+ * mas_find_range() - On the first call, find the entry at or after
+ * mas->index up to %max. Otherwise, advance to the next slot mas->index.
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_range(struct ma_state *mas, unsigned long max)
+{
+ void *entry;
+
+ if (mas_find_setup(mas, max, &entry))
+ return entry;
+
+ /* Retries on dead nodes handled by mas_next_slot */
+ return mas_next_slot(mas, max, true);
+}
+EXPORT_SYMBOL_GPL(mas_find_range);
+
+/**
+ * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
+ *
+ * Returns: True if entry is the answer, false otherwise.
+ */
+static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+ void **entry)
+{
+ *entry = NULL;
+
+ if (unlikely(mas_is_none(mas))) {
+ if (mas->index <= min)
+ goto none;
+
+ mas->last = mas->index;
+ mas->node = MAS_START;
+ }
+
if (unlikely(mas_is_paused(mas))) {
- if (unlikely(mas->last == ULONG_MAX)) {
+ if (unlikely(mas->index <= min)) {
mas->node = MAS_NONE;
- return NULL;
+ return true;
}
mas->node = MAS_START;
- mas->index = ++mas->last;
+ mas->last = --mas->index;
}
- if (unlikely(mas_is_none(mas)))
- mas->node = MAS_START;
-
if (unlikely(mas_is_start(mas))) {
/* First run or continue */
- void *entry;
+ if (mas->index < min)
+ return true;
- if (mas->index > max)
- return NULL;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
- entry = mas_walk(mas);
- if (entry)
- return entry;
+ if (unlikely(!mas_searchable(mas))) {
+ if (mas_is_ptr(mas))
+ goto none;
+
+ if (mas_is_none(mas)) {
+ /*
+ * Walked to the location, and there was nothing so the
+ * previous location is 0.
+ */
+ mas->last = mas->index = 0;
+ mas->node = MAS_ROOT;
+ *entry = mas_root(mas);
+ return true;
+ }
}
- if (unlikely(!mas_searchable(mas)))
- return NULL;
+ if (mas->index < min)
+ return true;
+
+ return false;
- /* Retries on dead nodes handled by mas_next_entry */
- return mas_next_entry(mas, max);
+none:
+ mas->node = MAS_NONE;
+ return true;
}
-EXPORT_SYMBOL_GPL(mas_find);
/**
* mas_find_rev: On the first call, find the first non-null entry at or below
@@ -6106,37 +6226,41 @@ EXPORT_SYMBOL_GPL(mas_find);
*/
void *mas_find_rev(struct ma_state *mas, unsigned long min)
{
- if (unlikely(mas_is_paused(mas))) {
- if (unlikely(mas->last == ULONG_MAX)) {
- mas->node = MAS_NONE;
- return NULL;
- }
- mas->node = MAS_START;
- mas->last = --mas->index;
- }
+ void *entry;
- if (unlikely(mas_is_start(mas))) {
- /* First run or continue */
- void *entry;
+ if (mas_find_rev_setup(mas, min, &entry))
+ return entry;
- if (mas->index < min)
- return NULL;
+ /* Retries on dead nodes handled by mas_prev_slot */
+ return mas_prev_slot(mas, min, false);
- entry = mas_walk(mas);
- if (entry)
- return entry;
- }
+}
+EXPORT_SYMBOL_GPL(mas_find_rev);
- if (unlikely(!mas_searchable(mas)))
- return NULL;
+/**
+ * mas_find_range_rev: On the first call, find the first non-null entry at or
+ * below mas->index down to %min. Otherwise advance to the previous slot after
+ * mas->index down to %min.
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
+{
+ void *entry;
- if (mas->index < min)
- return NULL;
+ if (mas_find_rev_setup(mas, min, &entry))
+ return entry;
- /* Retries on dead nodes handled by mas_prev_entry */
- return mas_prev_entry(mas, min);
+ /* Retries on dead nodes handled by mas_prev_slot */
+ return mas_prev_slot(mas, min, true);
}
-EXPORT_SYMBOL_GPL(mas_find_rev);
+EXPORT_SYMBOL_GPL(mas_find_range_rev);
/**
* mas_erase() - Find the range in which index resides and erase the entire
@@ -6363,7 +6487,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
{
int ret = 0;
- MA_STATE(mas, mt, min, max - size);
+ MA_STATE(mas, mt, min, min);
if (!mt_is_alloc(mt))
return -EINVAL;
@@ -6383,7 +6507,7 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
retry:
mas.offset = 0;
mas.index = min;
- mas.last = max - size;
+ mas.last = max - size + 1;
ret = mas_alloc(&mas, entry, size, startp);
if (mas_nomem(&mas, gfp))
goto retry;
@@ -6399,14 +6523,14 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
{
int ret = 0;
- MA_STATE(mas, mt, min, max - size);
+ MA_STATE(mas, mt, min, max - size + 1);
if (!mt_is_alloc(mt))
return -EINVAL;
if (WARN_ON_ONCE(mt_is_reserved(entry)))
return -EINVAL;
- if (min >= max)
+ if (min > max)
return -EINVAL;
if (max < size - 1)
@@ -6518,7 +6642,7 @@ retry:
if (entry)
goto unlock;
- while (mas_searchable(&mas) && (mas.index < max)) {
+ while (mas_searchable(&mas) && (mas.last < max)) {
entry = mas_next_entry(&mas, max);
if (likely(entry && !xa_is_zero(entry)))
break;
@@ -6531,10 +6655,9 @@ unlock:
if (likely(entry)) {
*index = mas.last + 1;
#ifdef CONFIG_DEBUG_MAPLE_TREE
- if ((*index) && (*index) <= copy)
+ if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
pr_err("index not increased! %lx <= %lx\n",
*index, copy);
- MT_BUG_ON(mt, (*index) && ((*index) <= copy));
#endif
}
@@ -6680,7 +6803,7 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
max = mas->max;
mas->offset = 0;
while (likely(!ma_is_leaf(mt))) {
- MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
+ MAS_WARN_ON(mas, mte_dead_node(mas->node));
slots = ma_slots(mn, mt);
entry = mas_slot(mas, slots, 0);
pivots = ma_pivots(mn, mt);
@@ -6691,7 +6814,7 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
mn = mas_mn(mas);
mt = mte_node_type(mas->node);
}
- MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
+ MAS_WARN_ON(mas, mte_dead_node(mas->node));
mas->max = max;
slots = ma_slots(mn, mt);
@@ -6741,15 +6864,12 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
mas->node = mn;
mas_ascend(mas);
- while (mas->node != MAS_NONE) {
+ do {
p = mas->node;
p_min = mas->min;
p_max = mas->max;
mas_prev_node(mas, 0);
- }
-
- if (p == MAS_NONE)
- return;
+ } while (!mas_is_none(mas));
mas->node = p;
mas->max = p_max;
@@ -6758,22 +6878,33 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
/* Tree validations */
static void mt_dump_node(const struct maple_tree *mt, void *entry,
- unsigned long min, unsigned long max, unsigned int depth);
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format);
static void mt_dump_range(unsigned long min, unsigned long max,
- unsigned int depth)
+ unsigned int depth, enum mt_dump_format format)
{
static const char spaces[] = " ";
- if (min == max)
- pr_info("%.*s%lu: ", depth * 2, spaces, min);
- else
- pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
+ switch(format) {
+ case mt_dump_hex:
+ if (min == max)
+ pr_info("%.*s%lx: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
+ break;
+ default:
+ case mt_dump_dec:
+ if (min == max)
+ pr_info("%.*s%lu: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
+ }
}
static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
- unsigned int depth)
+ unsigned int depth, enum mt_dump_format format)
{
- mt_dump_range(min, max, depth);
+ mt_dump_range(min, max, depth, format);
if (xa_is_value(entry))
pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
@@ -6787,7 +6918,8 @@ static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
}
static void mt_dump_range64(const struct maple_tree *mt, void *entry,
- unsigned long min, unsigned long max, unsigned int depth)
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
{
struct maple_range_64 *node = &mte_to_node(entry)->mr64;
bool leaf = mte_is_leaf(entry);
@@ -6795,8 +6927,16 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
int i;
pr_cont(" contents: ");
- for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
- pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
+ switch(format) {
+ case mt_dump_hex:
+ pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
+ break;
+ default:
+ case mt_dump_dec:
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ }
+ }
pr_cont("%p\n", node->slot[i]);
for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
unsigned long last = max;
@@ -6809,24 +6949,32 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
break;
if (leaf)
mt_dump_entry(mt_slot(mt, node->slot, i),
- first, last, depth + 1);
+ first, last, depth + 1, format);
else if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
- first, last, depth + 1);
+ first, last, depth + 1, format);
if (last == max)
break;
if (last > max) {
- pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ switch(format) {
+ case mt_dump_hex:
+ pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
- break;
+ break;
+ default:
+ case mt_dump_dec:
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ }
}
first = last + 1;
}
}
static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
- unsigned long min, unsigned long max, unsigned int depth)
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
{
struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
bool leaf = mte_is_leaf(entry);
@@ -6851,10 +6999,10 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
break;
if (leaf)
mt_dump_entry(mt_slot(mt, node->slot, i),
- first, last, depth + 1);
+ first, last, depth + 1, format);
else if (node->slot[i])
mt_dump_node(mt, mt_slot(mt, node->slot, i),
- first, last, depth + 1);
+ first, last, depth + 1, format);
if (last == max)
break;
@@ -6868,13 +7016,14 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
}
static void mt_dump_node(const struct maple_tree *mt, void *entry,
- unsigned long min, unsigned long max, unsigned int depth)
+ unsigned long min, unsigned long max, unsigned int depth,
+ enum mt_dump_format format)
{
struct maple_node *node = mte_to_node(entry);
unsigned int type = mte_node_type(entry);
unsigned int i;
- mt_dump_range(min, max, depth);
+ mt_dump_range(min, max, depth, format);
pr_cont("node %p depth %d type %d parent %p", node, depth, type,
node ? node->parent : NULL);
@@ -6885,15 +7034,15 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry,
if (min + i > max)
pr_cont("OUT OF RANGE: ");
mt_dump_entry(mt_slot(mt, node->slot, i),
- min + i, min + i, depth);
+ min + i, min + i, depth, format);
}
break;
case maple_leaf_64:
case maple_range_64:
- mt_dump_range64(mt, entry, min, max, depth);
+ mt_dump_range64(mt, entry, min, max, depth, format);
break;
case maple_arange_64:
- mt_dump_arange64(mt, entry, min, max, depth);
+ mt_dump_arange64(mt, entry, min, max, depth, format);
break;
default:
@@ -6901,16 +7050,16 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry,
}
}
-void mt_dump(const struct maple_tree *mt)
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
{
void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
pr_info("maple_tree(%p) flags %X, height %u root %p\n",
mt, mt->ma_flags, mt_height(mt), entry);
if (!xa_is_node(entry))
- mt_dump_entry(entry, 0, 0, 0);
+ mt_dump_entry(entry, 0, 0, 0, format);
else if (entry)
- mt_dump_node(mt, entry, 0, mt_node_max(entry), 0);
+ mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
}
EXPORT_SYMBOL_GPL(mt_dump);
@@ -6963,7 +7112,7 @@ static void mas_validate_gaps(struct ma_state *mas)
mas_mn(mas), i,
mas_get_slot(mas, i), gap,
p_end, p_start);
- mt_dump(mas->tree);
+ mt_dump(mas->tree, mt_dump_hex);
MT_BUG_ON(mas->tree,
gap != p_end - p_start + 1);
@@ -6994,27 +7143,29 @@ counted:
p_slot = mte_parent_slot(mas->node);
p_mn = mte_parent(mte);
MT_BUG_ON(mas->tree, max_gap > mas->max);
- if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
+ if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
- mt_dump(mas->tree);
+ mt_dump(mas->tree, mt_dump_hex);
}
MT_BUG_ON(mas->tree,
- ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
+ ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap);
}
static void mas_validate_parent_slot(struct ma_state *mas)
{
struct maple_node *parent;
struct maple_enode *node;
- enum maple_type p_type = mas_parent_enum(mas, mas->node);
- unsigned char p_slot = mte_parent_slot(mas->node);
+ enum maple_type p_type;
+ unsigned char p_slot;
void __rcu **slots;
int i;
if (mte_is_root(mas->node))
return;
+ p_slot = mte_parent_slot(mas->node);
+ p_type = mas_parent_type(mas, mas->node);
parent = mte_parent(mas->node);
slots = ma_slots(parent, p_type);
MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
@@ -7107,18 +7258,18 @@ static void mas_validate_limits(struct ma_state *mas)
if (prev_piv > piv) {
pr_err("%p[%u] piv %lu < prev_piv %lu\n",
mas_mn(mas), i, piv, prev_piv);
- MT_BUG_ON(mas->tree, piv < prev_piv);
+ MAS_WARN_ON(mas, piv < prev_piv);
}
if (piv < mas->min) {
pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
piv, mas->min);
- MT_BUG_ON(mas->tree, piv < mas->min);
+ MAS_WARN_ON(mas, piv < mas->min);
}
if (piv > mas->max) {
pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
piv, mas->max);
- MT_BUG_ON(mas->tree, piv > mas->max);
+ MAS_WARN_ON(mas, piv > mas->max);
}
prev_piv = piv;
if (piv == mas->max)
@@ -7141,7 +7292,7 @@ static void mas_validate_limits(struct ma_state *mas)
pr_err("%p[%u] should not have piv %lu\n",
mas_mn(mas), i, piv);
- MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
+ MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
}
}
}
@@ -7200,16 +7351,15 @@ void mt_validate(struct maple_tree *mt)
mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
while (!mas_is_none(&mas)) {
- MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
+ MAS_WARN_ON(&mas, mte_dead_node(mas.node));
if (!mte_is_root(mas.node)) {
end = mas_data_end(&mas);
- if ((end < mt_min_slot_count(mas.node)) &&
- (mas.max != ULONG_MAX)) {
+ if (MAS_WARN_ON(&mas,
+ (end < mt_min_slot_count(mas.node)) &&
+ (mas.max != ULONG_MAX))) {
pr_err("Invalid size %u of %p\n", end,
- mas_mn(&mas));
- MT_BUG_ON(mas.tree, 1);
+ mas_mn(&mas));
}
-
}
mas_validate_parent_slot(&mas);
mas_validate_child_slot(&mas);
@@ -7225,4 +7375,34 @@ done:
}
EXPORT_SYMBOL_GPL(mt_validate);
+void mas_dump(const struct ma_state *mas)
+{
+ pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
+ if (mas_is_none(mas))
+ pr_err("(MAS_NONE) ");
+ else if (mas_is_ptr(mas))
+ pr_err("(MAS_ROOT) ");
+ else if (mas_is_start(mas))
+ pr_err("(MAS_START) ");
+ else if (mas_is_paused(mas))
+ pr_err("(MAS_PAUSED) ");
+
+ pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
+ pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
+ mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
+ if (mas->index > mas->last)
+ pr_err("Check index & last\n");
+}
+EXPORT_SYMBOL_GPL(mas_dump);
+
+void mas_wr_dump(const struct ma_wr_state *wr_mas)
+{
+ pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
+ wr_mas->node, wr_mas->r_min, wr_mas->r_max);
+ pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
+ wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
+ wr_mas->end_piv);
+}
+EXPORT_SYMBOL_GPL(mas_wr_dump);
+
#endif /* CONFIG_DEBUG_MAPLE_TREE */
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index f1db333270e9..9939be34e516 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -11,12 +11,33 @@
#include <linux/module.h>
#define MTREE_ALLOC_MAX 0x2000000000000Ul
-#ifndef CONFIG_DEBUG_MAPLE_TREE
-#define CONFIG_DEBUG_MAPLE_TREE
-#endif
#define CONFIG_MAPLE_SEARCH
#define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31)
+#ifndef CONFIG_DEBUG_MAPLE_TREE
+#define mt_dump(mt, fmt) do {} while (0)
+#define mt_validate(mt) do {} while (0)
+#define mt_cache_shrink() do {} while (0)
+#define mas_dump(mas) do {} while (0)
+#define mas_wr_dump(mas) do {} while (0)
+atomic_t maple_tree_tests_run;
+atomic_t maple_tree_tests_passed;
+#undef MT_BUG_ON
+
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#endif
+
/* #define BENCH_SLOT_STORE */
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
@@ -30,54 +51,54 @@
#else
#define cond_resched() do {} while (0)
#endif
-static
-int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp)
+static int __init mtree_insert_index(struct maple_tree *mt,
+ unsigned long index, gfp_t gfp)
{
return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
}
-static void mtree_erase_index(struct maple_tree *mt, unsigned long index)
+static void __init mtree_erase_index(struct maple_tree *mt, unsigned long index)
{
MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX));
MT_BUG_ON(mt, mtree_load(mt, index) != NULL);
}
-static int mtree_test_insert(struct maple_tree *mt, unsigned long index,
+static int __init mtree_test_insert(struct maple_tree *mt, unsigned long index,
void *ptr)
{
return mtree_insert(mt, index, ptr, GFP_KERNEL);
}
-static int mtree_test_store_range(struct maple_tree *mt, unsigned long start,
- unsigned long end, void *ptr)
+static int __init mtree_test_store_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
{
return mtree_store_range(mt, start, end, ptr, GFP_KERNEL);
}
-static int mtree_test_store(struct maple_tree *mt, unsigned long start,
+static int __init mtree_test_store(struct maple_tree *mt, unsigned long start,
void *ptr)
{
return mtree_test_store_range(mt, start, start, ptr);
}
-static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start,
- unsigned long end, void *ptr)
+static int __init mtree_test_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr)
{
return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
}
-static void *mtree_test_load(struct maple_tree *mt, unsigned long index)
+static void __init *mtree_test_load(struct maple_tree *mt, unsigned long index)
{
return mtree_load(mt, index);
}
-static void *mtree_test_erase(struct maple_tree *mt, unsigned long index)
+static void __init *mtree_test_erase(struct maple_tree *mt, unsigned long index)
{
return mtree_erase(mt, index);
}
#if defined(CONFIG_64BIT)
-static noinline void check_mtree_alloc_range(struct maple_tree *mt,
+static noinline void __init check_mtree_alloc_range(struct maple_tree *mt,
unsigned long start, unsigned long end, unsigned long size,
unsigned long expected, int eret, void *ptr)
{
@@ -94,7 +115,7 @@ static noinline void check_mtree_alloc_range(struct maple_tree *mt,
MT_BUG_ON(mt, result != expected);
}
-static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
+static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt,
unsigned long start, unsigned long end, unsigned long size,
unsigned long expected, int eret, void *ptr)
{
@@ -102,7 +123,7 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
unsigned long result = expected + 1;
int ret;
- ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1,
+ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end,
GFP_KERNEL);
MT_BUG_ON(mt, ret != eret);
if (ret)
@@ -112,8 +133,8 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
}
#endif
-static noinline void check_load(struct maple_tree *mt, unsigned long index,
- void *ptr)
+static noinline void __init check_load(struct maple_tree *mt,
+ unsigned long index, void *ptr)
{
void *ret = mtree_test_load(mt, index);
@@ -122,7 +143,7 @@ static noinline void check_load(struct maple_tree *mt, unsigned long index,
MT_BUG_ON(mt, ret != ptr);
}
-static noinline void check_store_range(struct maple_tree *mt,
+static noinline void __init check_store_range(struct maple_tree *mt,
unsigned long start, unsigned long end, void *ptr, int expected)
{
int ret = -EINVAL;
@@ -138,7 +159,7 @@ static noinline void check_store_range(struct maple_tree *mt,
check_load(mt, i, ptr);
}
-static noinline void check_insert_range(struct maple_tree *mt,
+static noinline void __init check_insert_range(struct maple_tree *mt,
unsigned long start, unsigned long end, void *ptr, int expected)
{
int ret = -EINVAL;
@@ -154,8 +175,8 @@ static noinline void check_insert_range(struct maple_tree *mt,
check_load(mt, i, ptr);
}
-static noinline void check_insert(struct maple_tree *mt, unsigned long index,
- void *ptr)
+static noinline void __init check_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
{
int ret = -EINVAL;
@@ -163,7 +184,7 @@ static noinline void check_insert(struct maple_tree *mt, unsigned long index,
MT_BUG_ON(mt, ret != 0);
}
-static noinline void check_dup_insert(struct maple_tree *mt,
+static noinline void __init check_dup_insert(struct maple_tree *mt,
unsigned long index, void *ptr)
{
int ret = -EINVAL;
@@ -173,13 +194,13 @@ static noinline void check_dup_insert(struct maple_tree *mt,
}
-static noinline
-void check_index_load(struct maple_tree *mt, unsigned long index)
+static noinline void __init check_index_load(struct maple_tree *mt,
+ unsigned long index)
{
return check_load(mt, index, xa_mk_value(index & LONG_MAX));
}
-static inline int not_empty(struct maple_node *node)
+static inline __init int not_empty(struct maple_node *node)
{
int i;
@@ -194,8 +215,8 @@ static inline int not_empty(struct maple_node *node)
}
-static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
- bool verbose)
+static noinline void __init check_rev_seq(struct maple_tree *mt,
+ unsigned long max, bool verbose)
{
unsigned long i = max, j;
@@ -219,7 +240,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
#ifndef __KERNEL__
if (verbose) {
rcu_barrier();
- mt_dump(mt);
+ mt_dump(mt, mt_dump_dec);
pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
__func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(),
mt_nr_tallocated());
@@ -227,7 +248,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
#endif
}
-static noinline void check_seq(struct maple_tree *mt, unsigned long max,
+static noinline void __init check_seq(struct maple_tree *mt, unsigned long max,
bool verbose)
{
unsigned long i, j;
@@ -248,7 +269,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max,
#ifndef __KERNEL__
if (verbose) {
rcu_barrier();
- mt_dump(mt);
+ mt_dump(mt, mt_dump_dec);
pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
max, mt_get_alloc_size()/1024, mt_nr_allocated(),
mt_nr_tallocated());
@@ -256,7 +277,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max,
#endif
}
-static noinline void check_lb_not_empty(struct maple_tree *mt)
+static noinline void __init check_lb_not_empty(struct maple_tree *mt)
{
unsigned long i, j;
unsigned long huge = 4000UL * 1000 * 1000;
@@ -275,13 +296,13 @@ static noinline void check_lb_not_empty(struct maple_tree *mt)
mtree_destroy(mt);
}
-static noinline void check_lower_bound_split(struct maple_tree *mt)
+static noinline void __init check_lower_bound_split(struct maple_tree *mt)
{
MT_BUG_ON(mt, !mtree_empty(mt));
check_lb_not_empty(mt);
}
-static noinline void check_upper_bound_split(struct maple_tree *mt)
+static noinline void __init check_upper_bound_split(struct maple_tree *mt)
{
unsigned long i, j;
unsigned long huge;
@@ -306,7 +327,7 @@ static noinline void check_upper_bound_split(struct maple_tree *mt)
mtree_destroy(mt);
}
-static noinline void check_mid_split(struct maple_tree *mt)
+static noinline void __init check_mid_split(struct maple_tree *mt)
{
unsigned long huge = 8000UL * 1000 * 1000;
@@ -315,7 +336,7 @@ static noinline void check_mid_split(struct maple_tree *mt)
check_lb_not_empty(mt);
}
-static noinline void check_rev_find(struct maple_tree *mt)
+static noinline void __init check_rev_find(struct maple_tree *mt)
{
int i, nr_entries = 200;
void *val;
@@ -354,7 +375,7 @@ static noinline void check_rev_find(struct maple_tree *mt)
rcu_read_unlock();
}
-static noinline void check_find(struct maple_tree *mt)
+static noinline void __init check_find(struct maple_tree *mt)
{
unsigned long val = 0;
unsigned long count;
@@ -571,7 +592,7 @@ static noinline void check_find(struct maple_tree *mt)
mtree_destroy(mt);
}
-static noinline void check_find_2(struct maple_tree *mt)
+static noinline void __init check_find_2(struct maple_tree *mt)
{
unsigned long i, j;
void *entry;
@@ -616,7 +637,7 @@ static noinline void check_find_2(struct maple_tree *mt)
#if defined(CONFIG_64BIT)
-static noinline void check_alloc_rev_range(struct maple_tree *mt)
+static noinline void __init check_alloc_rev_range(struct maple_tree *mt)
{
/*
* Generated by:
@@ -624,7 +645,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
* awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
*/
- unsigned long range[] = {
+ static const unsigned long range[] = {
/* Inclusive , Exclusive. */
0x565234af2000, 0x565234af4000,
0x565234af4000, 0x565234af9000,
@@ -652,7 +673,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
0x7fff58791000, 0x7fff58793000,
};
- unsigned long holes[] = {
+ static const unsigned long holes[] = {
/*
* Note: start of hole is INCLUSIVE
* end of hole is EXCLUSIVE
@@ -672,7 +693,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
* 4. number that should be returned.
* 5. return value
*/
- unsigned long req_range[] = {
+ static const unsigned long req_range[] = {
0x565234af9000, /* Min */
0x7fff58791000, /* Max */
0x1000, /* Size */
@@ -680,7 +701,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
0, /* Return value success. */
0x0, /* Min */
- 0x565234AF1 << 12, /* Max */
+ 0x565234AF0 << 12, /* Max */
0x3000, /* Size */
0x565234AEE << 12, /* max - 3. */
0, /* Return value success. */
@@ -692,14 +713,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
0, /* Return value success. */
0x0, /* Min */
- 0x7F36D510A << 12, /* Max */
+ 0x7F36D5109 << 12, /* Max */
0x4000, /* Size */
0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
0, /* Return value success. */
/* Ascend test. */
0x0,
- 34148798629 << 12,
+ 34148798628 << 12,
19 << 12,
34148797418 << 12,
0x0,
@@ -711,6 +732,12 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
0x0,
-EBUSY,
+ /* Single space test. */
+ 34148798725 << 12,
+ 34148798725 << 12,
+ 1 << 12,
+ 34148798725 << 12,
+ 0,
};
int i, range_count = ARRAY_SIZE(range);
@@ -759,9 +786,9 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
mas_unlock(&mas);
for (i = 0; i < req_range_count; i += 5) {
#if DEBUG_REV_RANGE
- pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n",
- req_range[i] >> 12,
- (req_range[i + 1] >> 12) - 1,
+ pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n",
+ i, req_range[i] >> 12,
+ (req_range[i + 1] >> 12),
req_range[i+2] >> 12,
req_range[i+3] >> 12);
#endif
@@ -777,13 +804,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt)
mt_set_non_kernel(1);
mtree_erase(mt, 34148798727); /* create a deleted range. */
+ mtree_erase(mt, 34148798725);
check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
34148798725, 0, mt);
mtree_destroy(mt);
}
-static noinline void check_alloc_range(struct maple_tree *mt)
+static noinline void __init check_alloc_range(struct maple_tree *mt)
{
/*
* Generated by:
@@ -791,7 +819,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
* awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
*/
- unsigned long range[] = {
+ static const unsigned long range[] = {
/* Inclusive , Exclusive. */
0x565234af2000, 0x565234af4000,
0x565234af4000, 0x565234af9000,
@@ -818,7 +846,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
0x7fff5878e000, 0x7fff58791000,
0x7fff58791000, 0x7fff58793000,
};
- unsigned long holes[] = {
+ static const unsigned long holes[] = {
/* Start of hole, end of hole, size of hole (+1) */
0x565234afb000, 0x565234afc000, 0x1000,
0x565234afe000, 0x565235def000, 0x12F1000,
@@ -833,7 +861,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
* 4. number that should be returned.
* 5. return value
*/
- unsigned long req_range[] = {
+ static const unsigned long req_range[] = {
0x565234af9000, /* Min */
0x7fff58791000, /* Max */
0x1000, /* Size */
@@ -880,6 +908,13 @@ static noinline void check_alloc_range(struct maple_tree *mt)
4503599618982063UL << 12, /* Size */
34359052178 << 12, /* Expected location */
-EBUSY, /* Return failure. */
+
+ /* Test a single entry */
+ 34148798648 << 12, /* Min */
+ 34148798648 << 12, /* Max */
+ 4096, /* Size of 1 */
+ 34148798648 << 12, /* Location is the same as min/max */
+ 0, /* Success */
};
int i, range_count = ARRAY_SIZE(range);
int req_range_count = ARRAY_SIZE(req_range);
@@ -893,7 +928,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
#if DEBUG_ALLOC_RANGE
pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
(range[i + 1] >> 12) - 1);
- mt_dump(mt);
+ mt_dump(mt, mt_dump_hex);
#endif
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
@@ -934,7 +969,7 @@ static noinline void check_alloc_range(struct maple_tree *mt)
xa_mk_value(req_range[i] >> 12)); /* pointer */
mt_validate(mt);
#if DEBUG_ALLOC_RANGE
- mt_dump(mt);
+ mt_dump(mt, mt_dump_hex);
#endif
}
@@ -942,10 +977,10 @@ static noinline void check_alloc_range(struct maple_tree *mt)
}
#endif
-static noinline void check_ranges(struct maple_tree *mt)
+static noinline void __init check_ranges(struct maple_tree *mt)
{
int i, val, val2;
- unsigned long r[] = {
+ static const unsigned long r[] = {
10, 15,
20, 25,
17, 22, /* Overlaps previous range. */
@@ -1210,7 +1245,7 @@ static noinline void check_ranges(struct maple_tree *mt)
MT_BUG_ON(mt, mt_height(mt) != 4);
}
-static noinline void check_next_entry(struct maple_tree *mt)
+static noinline void __init check_next_entry(struct maple_tree *mt)
{
void *entry = NULL;
unsigned long limit = 30, i = 0;
@@ -1234,7 +1269,7 @@ static noinline void check_next_entry(struct maple_tree *mt)
mtree_destroy(mt);
}
-static noinline void check_prev_entry(struct maple_tree *mt)
+static noinline void __init check_prev_entry(struct maple_tree *mt)
{
unsigned long index = 16;
void *value;
@@ -1278,7 +1313,7 @@ static noinline void check_prev_entry(struct maple_tree *mt)
mas_unlock(&mas);
}
-static noinline void check_root_expand(struct maple_tree *mt)
+static noinline void __init check_root_expand(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
void *ptr;
@@ -1287,6 +1322,7 @@ static noinline void check_root_expand(struct maple_tree *mt)
mas_lock(&mas);
mas_set(&mas, 3);
ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, ptr != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
@@ -1356,7 +1392,7 @@ static noinline void check_root_expand(struct maple_tree *mt)
mas_store_gfp(&mas, ptr, GFP_KERNEL);
ptr = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, ptr != NULL);
- MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX));
+ MT_BUG_ON(mt, (mas.index != ULONG_MAX) && (mas.last != ULONG_MAX));
mas_set(&mas, 1);
ptr = mas_prev(&mas, 0);
@@ -1367,13 +1403,13 @@ static noinline void check_root_expand(struct maple_tree *mt)
mas_unlock(&mas);
}
-static noinline void check_gap_combining(struct maple_tree *mt)
+static noinline void __init check_gap_combining(struct maple_tree *mt)
{
struct maple_enode *mn1, *mn2;
void *entry;
unsigned long singletons = 100;
- unsigned long *seq100;
- unsigned long seq100_64[] = {
+ static const unsigned long *seq100;
+ static const unsigned long seq100_64[] = {
/* 0-5 */
74, 75, 76,
50, 100, 2,
@@ -1387,7 +1423,7 @@ static noinline void check_gap_combining(struct maple_tree *mt)
76, 2, 79, 85, 4,
};
- unsigned long seq100_32[] = {
+ static const unsigned long seq100_32[] = {
/* 0-5 */
61, 62, 63,
50, 100, 2,
@@ -1401,11 +1437,11 @@ static noinline void check_gap_combining(struct maple_tree *mt)
76, 2, 79, 85, 4,
};
- unsigned long seq2000[] = {
+ static const unsigned long seq2000[] = {
1152, 1151,
1100, 1200, 2,
};
- unsigned long seq400[] = {
+ static const unsigned long seq400[] = {
286, 318,
256, 260, 266, 270, 275, 280, 290, 398,
286, 310,
@@ -1564,7 +1600,7 @@ static noinline void check_gap_combining(struct maple_tree *mt)
mt_set_non_kernel(0);
mtree_destroy(mt);
}
-static noinline void check_node_overwrite(struct maple_tree *mt)
+static noinline void __init check_node_overwrite(struct maple_tree *mt)
{
int i, max = 4000;
@@ -1572,12 +1608,12 @@ static noinline void check_node_overwrite(struct maple_tree *mt)
mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100));
mtree_test_store_range(mt, 319951, 367950, NULL);
- /*mt_dump(mt); */
+ /*mt_dump(mt, mt_dump_dec); */
mt_validate(mt);
}
#if defined(BENCH_SLOT_STORE)
-static noinline void bench_slot_store(struct maple_tree *mt)
+static noinline void __init bench_slot_store(struct maple_tree *mt)
{
int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
@@ -1593,7 +1629,7 @@ static noinline void bench_slot_store(struct maple_tree *mt)
#endif
#if defined(BENCH_NODE_STORE)
-static noinline void bench_node_store(struct maple_tree *mt)
+static noinline void __init bench_node_store(struct maple_tree *mt)
{
int i, overwrite = 76, max = 240, count = 20000000;
@@ -1612,7 +1648,7 @@ static noinline void bench_node_store(struct maple_tree *mt)
#endif
#if defined(BENCH_AWALK)
-static noinline void bench_awalk(struct maple_tree *mt)
+static noinline void __init bench_awalk(struct maple_tree *mt)
{
int i, max = 2500, count = 50000000;
MA_STATE(mas, mt, 1470, 1470);
@@ -1629,7 +1665,7 @@ static noinline void bench_awalk(struct maple_tree *mt)
}
#endif
#if defined(BENCH_WALK)
-static noinline void bench_walk(struct maple_tree *mt)
+static noinline void __init bench_walk(struct maple_tree *mt)
{
int i, max = 2500, count = 550000000;
MA_STATE(mas, mt, 1470, 1470);
@@ -1646,7 +1682,7 @@ static noinline void bench_walk(struct maple_tree *mt)
#endif
#if defined(BENCH_MT_FOR_EACH)
-static noinline void bench_mt_for_each(struct maple_tree *mt)
+static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
int i, count = 1000000;
unsigned long max = 2500, index = 0;
@@ -1670,7 +1706,7 @@ static noinline void bench_mt_for_each(struct maple_tree *mt)
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
-static noinline void check_forking(struct maple_tree *mt)
+static noinline void __init check_forking(struct maple_tree *mt)
{
struct maple_tree newmt;
@@ -1709,7 +1745,7 @@ static noinline void check_forking(struct maple_tree *mt)
mtree_destroy(&newmt);
}
-static noinline void check_iteration(struct maple_tree *mt)
+static noinline void __init check_iteration(struct maple_tree *mt)
{
int i, nr_entries = 125;
void *val;
@@ -1765,7 +1801,6 @@ static noinline void check_iteration(struct maple_tree *mt)
mas.index = 760;
mas.last = 765;
mas_store(&mas, val);
- mas_next(&mas, ULONG_MAX);
}
i++;
}
@@ -1777,7 +1812,7 @@ static noinline void check_iteration(struct maple_tree *mt)
mt_set_non_kernel(0);
}
-static noinline void check_mas_store_gfp(struct maple_tree *mt)
+static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
{
struct maple_tree newmt;
@@ -1810,7 +1845,7 @@ static noinline void check_mas_store_gfp(struct maple_tree *mt)
}
#if defined(BENCH_FORK)
-static noinline void bench_forking(struct maple_tree *mt)
+static noinline void __init bench_forking(struct maple_tree *mt)
{
struct maple_tree newmt;
@@ -1852,15 +1887,17 @@ static noinline void bench_forking(struct maple_tree *mt)
}
#endif
-static noinline void next_prev_test(struct maple_tree *mt)
+static noinline void __init next_prev_test(struct maple_tree *mt)
{
int i, nr_entries;
void *val;
MA_STATE(mas, mt, 0, 0);
struct maple_enode *mn;
- unsigned long *level2;
- unsigned long level2_64[] = {707, 1000, 710, 715, 720, 725};
- unsigned long level2_32[] = {1747, 2000, 1750, 1755, 1760, 1765};
+ static const unsigned long *level2;
+ static const unsigned long level2_64[] = { 707, 1000, 710, 715, 720,
+ 725};
+ static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755,
+ 1760, 1765};
if (MAPLE_32BIT) {
nr_entries = 500;
@@ -1974,7 +2011,7 @@ static noinline void next_prev_test(struct maple_tree *mt)
val = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, val != NULL);
- MT_BUG_ON(mt, mas.index != ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 0x7d6);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
val = mas_prev(&mas, 0);
@@ -1998,7 +2035,8 @@ static noinline void next_prev_test(struct maple_tree *mt)
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
- MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.last != 5);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
mas.index = 0;
mas.last = 5;
@@ -2010,7 +2048,7 @@ static noinline void next_prev_test(struct maple_tree *mt)
val = mas_prev(&mas, 0);
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
- MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.last != 9);
mas_unlock(&mas);
mtree_destroy(mt);
@@ -2028,7 +2066,7 @@ static noinline void next_prev_test(struct maple_tree *mt)
/* Test spanning writes that require balancing right sibling or right cousin */
-static noinline void check_spanning_relatives(struct maple_tree *mt)
+static noinline void __init check_spanning_relatives(struct maple_tree *mt)
{
unsigned long i, nr_entries = 1000;
@@ -2041,7 +2079,7 @@ static noinline void check_spanning_relatives(struct maple_tree *mt)
mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL);
}
-static noinline void check_fuzzer(struct maple_tree *mt)
+static noinline void __init check_fuzzer(struct maple_tree *mt)
{
/*
* 1. Causes a spanning rebalance of a single root node.
@@ -2438,7 +2476,7 @@ static noinline void check_fuzzer(struct maple_tree *mt)
}
/* duplicate the tree with a specific gap */
-static noinline void check_dup_gaps(struct maple_tree *mt,
+static noinline void __init check_dup_gaps(struct maple_tree *mt,
unsigned long nr_entries, bool zero_start,
unsigned long gap)
{
@@ -2478,7 +2516,7 @@ static noinline void check_dup_gaps(struct maple_tree *mt,
}
/* Duplicate many sizes of trees. Mainly to test expected entry values */
-static noinline void check_dup(struct maple_tree *mt)
+static noinline void __init check_dup(struct maple_tree *mt)
{
int i;
int big_start = 100010;
@@ -2566,7 +2604,7 @@ static noinline void check_dup(struct maple_tree *mt)
}
}
-static noinline void check_bnode_min_spanning(struct maple_tree *mt)
+static noinline void __init check_bnode_min_spanning(struct maple_tree *mt)
{
int i = 50;
MA_STATE(mas, mt, 0, 0);
@@ -2585,7 +2623,7 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt)
mt_set_non_kernel(0);
}
-static noinline void check_empty_area_window(struct maple_tree *mt)
+static noinline void __init check_empty_area_window(struct maple_tree *mt)
{
unsigned long i, nr_entries = 20;
MA_STATE(mas, mt, 0, 0);
@@ -2660,7 +2698,7 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY);
mas_reset(&mas);
- MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY);
+ MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EINVAL);
mas_reset(&mas);
mas_empty_area(&mas, 100, 165, 3);
@@ -2670,7 +2708,7 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
rcu_read_unlock();
}
-static noinline void check_empty_area_fill(struct maple_tree *mt)
+static noinline void __init check_empty_area_fill(struct maple_tree *mt)
{
const unsigned long max = 0x25D78000;
unsigned long size;
@@ -2713,12 +2751,635 @@ static noinline void check_empty_area_fill(struct maple_tree *mt)
mt_set_non_kernel(0);
}
+/*
+ * Check MAS_START, MAS_PAUSE, active (implied), and MAS_NONE transitions.
+ *
+ * The table below shows the single entry tree (0-0 pointer) and normal tree
+ * with nodes.
+ *
+ * Function ENTRY Start Result index & last
+ * ┬ ┬ ┬ ┬ ┬
+ * │ │ │ │ └─ the final range
+ * │ │ │ └─ The node value after execution
+ * │ │ └─ The node value before execution
+ * │ └─ If the entry exists or does not exists (DNE)
+ * └─ The function name
+ *
+ * Function ENTRY Start Result index & last
+ * mas_next()
+ * - after last
+ * Single entry tree at 0-0
+ * ------------------------
+ * DNE MAS_START MAS_NONE 1 - oo
+ * DNE MAS_PAUSE MAS_NONE 1 - oo
+ * DNE MAS_ROOT MAS_NONE 1 - oo
+ * when index = 0
+ * DNE MAS_NONE MAS_ROOT 0
+ * when index > 0
+ * DNE MAS_NONE MAS_NONE 1 - oo
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to last range
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to last range
+ * exists MAS_NONE active range
+ * exists active active range
+ * DNE active active set to last range
+ *
+ * Function ENTRY Start Result index & last
+ * mas_prev()
+ * - before index
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ *
+ * if index == 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to min
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to min
+ * exists MAS_NONE active range
+ * DNE MAS_NONE MAS_NONE set to min
+ * any MAS_ROOT MAS_NONE 0
+ * exists active active range
+ * DNE active active last range
+ *
+ * Function ENTRY Start Result index & last
+ * mas_find()
+ * - at index or next
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * if index == 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to max
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to max
+ * exists MAS_NONE active range
+ * exists active active range
+ * DNE active active last range (max < last)
+ *
+ * Function ENTRY Start Result index & last
+ * mas_find_rev()
+ * - at index or before
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ * if index == 0
+ * DNE MAS_START MAS_NONE 0
+ * DNE MAS_PAUSE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_ROOT MAS_NONE 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active set to min
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active set to min
+ * exists MAS_NONE active range
+ * exists active active range
+ * DNE active active last range (min > index)
+ *
+ * Function ENTRY Start Result index & last
+ * mas_walk()
+ * - Look up index
+ * Single entry tree at 0-0
+ * ------------------------
+ * if index > 0
+ * DNE MAS_START MAS_ROOT 1 - oo
+ * DNE MAS_PAUSE MAS_ROOT 1 - oo
+ * DNE MAS_NONE MAS_ROOT 1 - oo
+ * DNE MAS_ROOT MAS_ROOT 1 - oo
+ * if index == 0
+ * exists MAS_START MAS_ROOT 0
+ * exists MAS_PAUSE MAS_ROOT 0
+ * exists MAS_NONE MAS_ROOT 0
+ * exists MAS_ROOT MAS_ROOT 0
+ *
+ * Normal tree
+ * -----------
+ * exists MAS_START active range
+ * DNE MAS_START active range of NULL
+ * exists MAS_PAUSE active range
+ * DNE MAS_PAUSE active range of NULL
+ * exists MAS_NONE active range
+ * DNE MAS_NONE active range of NULL
+ * exists active active range
+ * DNE active active range of NULL
+ */
+
+#define mas_active(x) (((x).node != MAS_ROOT) && \
+ ((x).node != MAS_START) && \
+ ((x).node != MAS_PAUSE) && \
+ ((x).node != MAS_NONE))
+static noinline void __init check_state_handling(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ void *entry, *ptr = (void *) 0x1234500;
+ void *ptr2 = &ptr;
+ void *ptr3 = &ptr2;
+
+ /* Check MAS_ROOT First */
+ mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
+
+ mas_lock(&mas);
+ /* prev: Start -> none */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* prev: Start -> root */
+ mas_set(&mas, 10);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* prev: pause -> root */
+ mas_set(&mas, 10);
+ mas_pause(&mas);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* next: start -> none */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* next: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* find: root -> none */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find: none -> none */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find_rev: none -> root */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* find_rev: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* find_rev: root -> none */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find_rev: none -> none */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* find_rev: start -> root */
+ mas_set(&mas, 10);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: start -> none */
+ mas_set(&mas, 10);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: pause -> none*/
+ mas_set(&mas, 10);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: none -> none */
+ mas.index = mas.last = 10;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: none -> none */
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: start -> root */
+ mas_set(&mas, 0);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: pause -> root */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: none -> root */
+ mas.node = MAS_NONE;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: root -> root */
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ /* walk: root -> none */
+ mas_set(&mas, 10);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_NONE);
+
+ /* walk: none -> root */
+ mas.index = mas.last = 0;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+ MT_BUG_ON(mt, mas.node != MAS_ROOT);
+
+ mas_unlock(&mas);
+
+ /* Check when there is an actual node */
+ mtree_store_range(mt, 0, 0, NULL, GFP_KERNEL);
+ mtree_store_range(mt, 0x1000, 0x1500, ptr, GFP_KERNEL);
+ mtree_store_range(mt, 0x2000, 0x2500, ptr2, GFP_KERNEL);
+ mtree_store_range(mt, 0x3000, 0x3500, ptr3, GFP_KERNEL);
+
+ mas_lock(&mas);
+
+ /* next: start ->active */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next: pause ->active */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next: none ->active */
+ mas.index = mas.last = 0;
+ mas.offset = 0;
+ mas.node = MAS_NONE;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active ->active */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> active out of range*/
+ entry = mas_next(&mas, 0x2999);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x2501);
+ MT_BUG_ON(mt, mas.last != 0x2fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* Continue after out of range*/
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> active out of range*/
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next: none -> active, skip value at location */
+ mas_set(&mas, 0);
+ entry = mas_next(&mas, ULONG_MAX);
+ mas.node = MAS_NONE;
+ mas.offset = 0;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active ->active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> active out of range*/
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev: pause ->active */
+ mas_set(&mas, 0x3600);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ mas_pause(&mas);
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> active out of range*/
+ entry = mas_prev(&mas, 0x1600);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev: active ->active, continue*/
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: start ->active */
+ mas_set(&mas, 0);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: pause ->active */
+ mas_set(&mas, 0);
+ mas_pause(&mas);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: start ->active on value */;
+ mas_set(&mas, 1200);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find:active ->active */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+
+ /* find:active -> active (NULL)*/
+ entry = mas_find(&mas, 0x2700);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x2501);
+ MT_BUG_ON(mt, mas.last != 0x2FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find: none ->active */
+ entry = mas_find(&mas, 0x5000);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find:active -> active (NULL) end*/
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev: active (END) ->active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev:active ->active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr2);
+ MT_BUG_ON(mt, mas.index != 0x2000);
+ MT_BUG_ON(mt, mas.last != 0x2500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev: pause ->active */
+ mas_pause(&mas);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev:active -> active */
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* find_rev: start ->active */
+ mas_set(&mas, 0x1200);
+ entry = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk start ->active */
+ mas_set(&mas, 0x1200);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk start ->active */
+ mas_set(&mas, 0x1600);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk pause ->active */
+ mas_set(&mas, 0x1200);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk pause -> active */
+ mas_set(&mas, 0x1600);
+ mas_pause(&mas);
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk none -> active */
+ mas_set(&mas, 0x1200);
+ mas.node = MAS_NONE;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk none -> active */
+ mas_set(&mas, 0x1600);
+ mas.node = MAS_NONE;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk active -> active */
+ mas.index = 0x1200;
+ mas.last = 0x1200;
+ mas.offset = 0;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* mas_walk active -> active */
+ mas.index = 0x1600;
+ mas.last = 0x1600;
+ entry = mas_walk(&mas);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1501);
+ MT_BUG_ON(mt, mas.last != 0x1fff);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ mas_unlock(&mas);
+}
+
static DEFINE_MTREE(tree);
-static int maple_tree_seed(void)
+static int __init maple_tree_seed(void)
{
- unsigned long set[] = {5015, 5014, 5017, 25, 1000,
- 1001, 1002, 1003, 1005, 0,
- 5003, 5002};
+ unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 5003, 5002};
void *ptr = &set;
pr_info("\nTEST STARTING\n\n");
@@ -2974,6 +3635,10 @@ static int maple_tree_seed(void)
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_state_handling(&tree);
+ mtree_destroy(&tree);
+
#if defined(BENCH)
skip:
#endif
@@ -2988,7 +3653,7 @@ skip:
return -EINVAL;
}
-static void maple_tree_harvest(void)
+static void __exit maple_tree_harvest(void)
{
}
diff --git a/mm/compaction.c b/mm/compaction.c
index c8bcdea15f5f..5584fa5fa3d4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1736,6 +1736,7 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE
*/
static unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
static int sysctl_extfrag_threshold = 500;
+static int __read_mostly sysctl_compact_memory;
static inline void
update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
@@ -2780,6 +2781,15 @@ static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int
static int sysctl_compaction_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, length, ppos);
+ if (ret)
+ return ret;
+
+ if (sysctl_compact_memory != 1)
+ return -EINVAL;
+
if (write)
compact_nodes();
@@ -3095,7 +3105,7 @@ static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
static struct ctl_table vm_compaction[] = {
{
.procname = "compact_memory",
- .data = NULL,
+ .data = &sysctl_compact_memory,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = sysctl_compaction_handler,
diff --git a/mm/debug.c b/mm/debug.c
index c7b228097bd9..ee533a5ceb79 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -268,4 +268,13 @@ void page_init_poison(struct page *page, size_t size)
if (page_init_poisoning)
memset(page, PAGE_POISON_PATTERN, size);
}
+
+void vma_iter_dump_tree(const struct vma_iterator *vmi)
+{
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ mas_dump(&vmi->mas);
+ mt_dump(vmi->mas.tree, mt_dump_hex);
+#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+}
+
#endif /* CONFIG_DEBUG_VM */
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index c54177aabebd..b1f85c49ab06 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -109,10 +109,10 @@ static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
WARN_ON(!pte_same(pte, pte));
WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
- WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
+ WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
- WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
+ WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
}
@@ -153,7 +153,7 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args)
pte = pte_mkclean(pte);
set_pte_at(args->mm, args->vaddr, args->ptep, pte);
flush_dcache_page(page);
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, args->vma);
pte = pte_mkdirty(pte);
ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
pte = ptep_get(args->ptep);
@@ -199,10 +199,10 @@ static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
WARN_ON(!pmd_same(pmd, pmd));
WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
- WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
+ WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
- WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
+ WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
/*
@@ -253,7 +253,7 @@ static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
pmd = pmd_mkclean(pmd);
set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
flush_dcache_page(page);
- pmd = pmd_mkwrite(pmd);
+ pmd = pmd_mkwrite(pmd, args->vma);
pmd = pmd_mkdirty(pmd);
pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
pmd = READ_ONCE(*args->pmdp);
@@ -928,8 +928,8 @@ static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
pte = mk_huge_pte(page, args->page_prot);
WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
- WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
- WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
+ WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte), args->vma)));
+ WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte, args->vma))));
#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
diff --git a/mm/dmapool.c b/mm/dmapool.c
index d2b0f8fc9649..a151a21e571b 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -226,7 +226,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
{
struct dma_pool *retval;
size_t allocation;
- bool empty = false;
+ bool empty;
if (!dev)
return NULL;
@@ -276,8 +276,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
*/
mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock);
- if (list_empty(&dev->dma_pools))
- empty = true;
+ empty = list_empty(&dev->dma_pools);
list_add(&retval->pools, &dev->dma_pools);
mutex_unlock(&pools_lock);
if (empty) {
@@ -361,7 +360,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
void dma_pool_destroy(struct dma_pool *pool)
{
struct dma_page *page, *tmp;
- bool empty = false, busy = false;
+ bool empty, busy = false;
if (unlikely(!pool))
return;
@@ -369,8 +368,7 @@ void dma_pool_destroy(struct dma_pool *pool)
mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock);
list_del(&pool->pools);
- if (list_empty(&pool->dev->dma_pools))
- empty = true;
+ empty = list_empty(&pool->dev->dma_pools);
mutex_unlock(&pools_lock);
if (empty)
device_remove_file(pool->dev, &dev_attr_pools);
diff --git a/mm/filemap.c b/mm/filemap.c
index b4c9bd368b7e..2d3d70c64dfd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/file.h>
@@ -58,6 +59,8 @@
#include <asm/mman.h>
+#include "swap.h"
+
/*
* Shared mappings implemented 30.11.1994. It's not fully working yet,
* though.
@@ -4119,3 +4122,171 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp)
return try_to_free_buffers(folio);
}
EXPORT_SYMBOL(filemap_release_folio);
+
+#ifdef CONFIG_CACHESTAT_SYSCALL
+/**
+ * filemap_cachestat() - compute the page cache statistics of a mapping
+ * @mapping: The mapping to compute the statistics for.
+ * @first_index: The starting page cache index.
+ * @last_index: The final page index (inclusive).
+ * @cs: the cachestat struct to write the result to.
+ *
+ * This will query the page cache statistics of a mapping in the
+ * page range of [first_index, last_index] (inclusive). The statistics
+ * queried include: number of dirty pages, number of pages marked for
+ * writeback, and the number of (recently) evicted pages.
+ */
+static void filemap_cachestat(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
+{
+ XA_STATE(xas, &mapping->i_pages, first_index);
+ struct folio *folio;
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, last_index) {
+ unsigned long nr_pages;
+ pgoff_t folio_first_index, folio_last_index;
+
+ if (xas_retry(&xas, folio))
+ continue;
+
+ if (xa_is_value(folio)) {
+ /* page is evicted */
+ void *shadow = (void *)folio;
+ bool workingset; /* not used */
+ int order = xa_get_order(xas.xa, xas.xa_index);
+
+ nr_pages = 1 << order;
+ folio_first_index = round_down(xas.xa_index, 1 << order);
+ folio_last_index = folio_first_index + nr_pages - 1;
+
+ /* Folios might straddle the range boundaries, only count covered pages */
+ if (folio_first_index < first_index)
+ nr_pages -= first_index - folio_first_index;
+
+ if (folio_last_index > last_index)
+ nr_pages -= folio_last_index - last_index;
+
+ cs->nr_evicted += nr_pages;
+
+#ifdef CONFIG_SWAP /* implies CONFIG_MMU */
+ if (shmem_mapping(mapping)) {
+ /* shmem file - in swap cache */
+ swp_entry_t swp = radix_to_swp_entry(folio);
+
+ shadow = get_shadow_from_swap_cache(swp);
+ }
+#endif
+ if (workingset_test_recent(shadow, true, &workingset))
+ cs->nr_recently_evicted += nr_pages;
+
+ goto resched;
+ }
+
+ nr_pages = folio_nr_pages(folio);
+ folio_first_index = folio_pgoff(folio);
+ folio_last_index = folio_first_index + nr_pages - 1;
+
+ /* Folios might straddle the range boundaries, only count covered pages */
+ if (folio_first_index < first_index)
+ nr_pages -= first_index - folio_first_index;
+
+ if (folio_last_index > last_index)
+ nr_pages -= folio_last_index - last_index;
+
+ /* page is in cache */
+ cs->nr_cache += nr_pages;
+
+ if (folio_test_dirty(folio))
+ cs->nr_dirty += nr_pages;
+
+ if (folio_test_writeback(folio))
+ cs->nr_writeback += nr_pages;
+
+resched:
+ if (need_resched()) {
+ xas_pause(&xas);
+ cond_resched_rcu();
+ }
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * The cachestat(2) system call.
+ *
+ * cachestat() returns the page cache statistics of a file in the
+ * bytes range specified by `off` and `len`: number of cached pages,
+ * number of dirty pages, number of pages marked for writeback,
+ * number of evicted pages, and number of recently evicted pages.
+ *
+ * An evicted page is a page that is previously in the page cache
+ * but has been evicted since. A page is recently evicted if its last
+ * eviction was recent enough that its reentry to the cache would
+ * indicate that it is actively being used by the system, and that
+ * there is memory pressure on the system.
+ *
+ * `off` and `len` must be non-negative integers. If `len` > 0,
+ * the queried range is [`off`, `off` + `len`]. If `len` == 0,
+ * we will query in the range from `off` to the end of the file.
+ *
+ * The `flags` argument is unused for now, but is included for future
+ * extensibility. User should pass 0 (i.e no flag specified).
+ *
+ * Currently, hugetlbfs is not supported.
+ *
+ * Because the status of a page can change after cachestat() checks it
+ * but before it returns to the application, the returned values may
+ * contain stale information.
+ *
+ * return values:
+ * zero - success
+ * -EFAULT - cstat or cstat_range points to an illegal address
+ * -EINVAL - invalid flags
+ * -EBADF - invalid file descriptor
+ * -EOPNOTSUPP - file descriptor is of a hugetlbfs file
+ */
+SYSCALL_DEFINE4(cachestat, unsigned int, fd,
+ struct cachestat_range __user *, cstat_range,
+ struct cachestat __user *, cstat, unsigned int, flags)
+{
+ struct fd f = fdget(fd);
+ struct address_space *mapping;
+ struct cachestat_range csr;
+ struct cachestat cs;
+ pgoff_t first_index, last_index;
+
+ if (!f.file)
+ return -EBADF;
+
+ if (copy_from_user(&csr, cstat_range,
+ sizeof(struct cachestat_range))) {
+ fdput(f);
+ return -EFAULT;
+ }
+
+ /* hugetlbfs is not supported */
+ if (is_file_hugepages(f.file)) {
+ fdput(f);
+ return -EOPNOTSUPP;
+ }
+
+ if (flags != 0) {
+ fdput(f);
+ return -EINVAL;
+ }
+
+ first_index = csr.off >> PAGE_SHIFT;
+ last_index =
+ csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
+ memset(&cs, 0, sizeof(struct cachestat));
+ mapping = f.file->f_mapping;
+ filemap_cachestat(mapping, first_index, last_index, &cs);
+ fdput(f);
+
+ if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
+ return -EFAULT;
+
+ return 0;
+}
+#endif /* CONFIG_CACHESTAT_SYSCALL */
diff --git a/mm/gup.c b/mm/gup.c
index bbe416236593..75f2d6c9a5d1 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -18,6 +18,7 @@
#include <linux/migrate.h>
#include <linux/mm_inline.h>
#include <linux/sched/mm.h>
+#include <linux/shmem_fs.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -959,16 +960,54 @@ static int faultin_page(struct vm_area_struct *vma,
return 0;
}
+/*
+ * Writing to file-backed mappings which require folio dirty tracking using GUP
+ * is a fundamentally broken operation, as kernel write access to GUP mappings
+ * do not adhere to the semantics expected by a file system.
+ *
+ * Consider the following scenario:-
+ *
+ * 1. A folio is written to via GUP which write-faults the memory, notifying
+ * the file system and dirtying the folio.
+ * 2. Later, writeback is triggered, resulting in the folio being cleaned and
+ * the PTE being marked read-only.
+ * 3. The GUP caller writes to the folio, as it is mapped read/write via the
+ * direct mapping.
+ * 4. The GUP caller, now done with the page, unpins it and sets it dirty
+ * (though it does not have to).
+ *
+ * This results in both data being written to a folio without writenotify, and
+ * the folio being dirtied unexpectedly (if the caller decides to do so).
+ */
+static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
+ unsigned long gup_flags)
+{
+ /*
+ * If we aren't pinning then no problematic write can occur. A long term
+ * pin is the most egregious case so this is the case we disallow.
+ */
+ if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
+ (FOLL_PIN | FOLL_LONGTERM))
+ return true;
+
+ /*
+ * If the VMA does not require dirty tracking then no problematic write
+ * can occur either.
+ */
+ return !vma_needs_dirty_tracking(vma);
+}
+
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
vm_flags_t vm_flags = vma->vm_flags;
int write = (gup_flags & FOLL_WRITE);
int foreign = (gup_flags & FOLL_REMOTE);
+ bool vma_anon = vma_is_anonymous(vma);
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
- if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+ if ((gup_flags & FOLL_ANON) && !vma_anon)
return -EFAULT;
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
@@ -978,7 +1017,11 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
return -EFAULT;
if (write) {
- if (!(vm_flags & VM_WRITE)) {
+ if (!vma_anon &&
+ !writable_file_mapping_allowed(vma, gup_flags))
+ return -EFAULT;
+
+ if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
@@ -2337,6 +2380,82 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
*/
#ifdef CONFIG_HAVE_FAST_GUP
+/*
+ * Used in the GUP-fast path to determine whether a pin is permitted for a
+ * specific folio.
+ *
+ * This call assumes the caller has pinned the folio, that the lowest page table
+ * level still points to this folio, and that interrupts have been disabled.
+ *
+ * Writing to pinned file-backed dirty tracked folios is inherently problematic
+ * (see comment describing the writable_file_mapping_allowed() function). We
+ * therefore try to avoid the most egregious case of a long-term mapping doing
+ * so.
+ *
+ * This function cannot be as thorough as that one as the VMA is not available
+ * in the fast path, so instead we whitelist known good cases and if in doubt,
+ * fall back to the slow path.
+ */
+static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
+{
+ struct address_space *mapping;
+ unsigned long mapping_flags;
+
+ /*
+ * If we aren't pinning then no problematic write can occur. A long term
+ * pin is the most egregious case so this is the one we disallow.
+ */
+ if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) !=
+ (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
+ return true;
+
+ /* The folio is pinned, so we can safely access folio fields. */
+
+ if (WARN_ON_ONCE(folio_test_slab(folio)))
+ return false;
+
+ /* hugetlb mappings do not require dirty-tracking. */
+ if (folio_test_hugetlb(folio))
+ return true;
+
+ /*
+ * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
+ * cannot proceed, which means no actions performed under RCU can
+ * proceed either.
+ *
+ * inodes and thus their mappings are freed under RCU, which means the
+ * mapping cannot be freed beneath us and thus we can safely dereference
+ * it.
+ */
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * However, there may be operations which _alter_ the mapping, so ensure
+ * we read it once and only once.
+ */
+ mapping = READ_ONCE(folio->mapping);
+
+ /*
+ * The mapping may have been truncated, in any case we cannot determine
+ * if this mapping is safe - fall back to slow path to determine how to
+ * proceed.
+ */
+ if (!mapping)
+ return false;
+
+ /* Anonymous folios pose no problem. */
+ mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
+ if (mapping_flags)
+ return mapping_flags & PAGE_MAPPING_ANON;
+
+ /*
+ * At this point, we know the mapping is non-null and points to an
+ * address_space object. The only remaining whitelisted file system is
+ * shmem.
+ */
+ return shmem_mapping(mapping);
+}
+
static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
unsigned int flags,
struct page **pages)
@@ -2422,6 +2541,11 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
goto pte_unmap;
}
+ if (!folio_fast_pin_allowed(folio, flags)) {
+ gup_put_folio(folio, 1, flags);
+ goto pte_unmap;
+ }
+
if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
gup_put_folio(folio, 1, flags);
goto pte_unmap;
@@ -2614,6 +2738,11 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
return 0;
}
+ if (!folio_fast_pin_allowed(folio, flags)) {
+ gup_put_folio(folio, refs, flags);
+ return 0;
+ }
+
if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
gup_put_folio(folio, refs, flags);
return 0;
@@ -2680,6 +2809,10 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return 0;
}
+ if (!folio_fast_pin_allowed(folio, flags)) {
+ gup_put_folio(folio, refs, flags);
+ return 0;
+ }
if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
gup_put_folio(folio, refs, flags);
return 0;
@@ -2720,6 +2853,11 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return 0;
}
+ if (!folio_fast_pin_allowed(folio, flags)) {
+ gup_put_folio(folio, refs, flags);
+ return 0;
+ }
+
if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
gup_put_folio(folio, refs, flags);
return 0;
@@ -2755,6 +2893,16 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
return 0;
}
+ if (!folio_fast_pin_allowed(folio, flags)) {
+ gup_put_folio(folio, refs, flags);
+ return 0;
+ }
+
+ if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
+ gup_put_folio(folio, refs, flags);
+ return 0;
+ }
+
*nr += refs;
folio_set_referenced(folio);
return 1;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 624671aaa60d..c3cc20c1b26c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -551,7 +551,7 @@ __setup("transparent_hugepage=", setup_transparent_hugepage);
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pmd = pmd_mkwrite(pmd);
+ pmd = pmd_mkwrite(pmd, vma);
return pmd;
}
@@ -1572,7 +1572,7 @@ out_map:
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
pmd = pmd_mkyoung(pmd);
if (writable)
- pmd = pmd_mkwrite(pmd);
+ pmd = pmd_mkwrite(pmd, vma);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl);
@@ -1681,6 +1681,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/
orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
tlb->fullmm);
+ arch_check_zapped_pmd(vma, orig_pmd);
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
if (vma_is_special_huge(vma)) {
if (arch_needs_pgtable_deposit())
@@ -1924,7 +1925,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
/* See change_pte_range(). */
if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
can_change_pmd_writable(vma, addr, entry))
- entry = pmd_mkwrite(entry);
+ entry = pmd_mkwrite(entry, vma);
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
@@ -2234,7 +2235,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
} else {
entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
if (write)
- entry = pte_mkwrite(entry);
+ entry = pte_mkwrite(entry, vma);
if (anon_exclusive)
SetPageAnonExclusive(page + i);
if (!young)
@@ -3271,7 +3272,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
- pmde = pmd_mkwrite(pmde);
+ pmde = pmd_mkwrite(pmde, vma);
if (pmd_swp_uffd_wp(*pvmw->pmd))
pmde = pmd_mkuffd_wp(pmde);
if (!is_migration_entry_young(entry))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f154019e6b84..e4b3e48514d5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4949,7 +4949,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
if (writable) {
entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
- vma->vm_page_prot)));
+ vma->vm_page_prot)), vma);
} else {
entry = huge_pte_wrprotect(mk_huge_pte(page,
vma->vm_page_prot));
@@ -4965,7 +4965,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
{
pte_t entry;
- entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
+ entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)), vma);
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep);
}
diff --git a/mm/internal.h b/mm/internal.h
index 68410c6d97ac..e28442c0858a 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -535,14 +535,14 @@ static inline bool is_exec_mapping(vm_flags_t flags)
}
/*
- * Stack area - automatically grows in one direction
+ * Stack area (including shadow stacks)
*
* VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
* do_mmap() forbids all other combinations.
*/
static inline bool is_stack_mapping(vm_flags_t flags)
{
- return (flags & VM_STACK) == VM_STACK;
+ return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
}
/*
@@ -1047,17 +1047,23 @@ static inline void vma_iter_store(struct vma_iterator *vmi,
{
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
- if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
- printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
- printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
- printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
- mt_dump(vmi->mas.tree);
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
+ vmi->mas.index > vma->vm_start)) {
+ pr_warn("%lx > %lx\n"
+ "store of vma %lx-%lx\n"
+ "into slot %lx-%lx\n",
+ vmi->mas.index, vma->vm_start,
+ vma->vm_start, vma->vm_end,
+ vmi->mas.index, vmi->mas.last);
}
- if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) {
- printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
- printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
- printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last);
- mt_dump(vmi->mas.tree);
+ if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START &&
+ vmi->mas.last < vma->vm_start)) {
+ pr_warn("%lx < %lx\n"
+ "store of vma %lx-%lx\n"
+ "into slot %lx-%lx\n",
+ vmi->mas.last, vma->vm_start,
+ vma->vm_start, vma->vm_end,
+ vmi->mas.index, vmi->mas.last);
}
#endif
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index 2aafc46a4aaf..392fb273e7bd 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -29,7 +29,7 @@
* canary of every 8 bytes is the same. 64-bit memory can be filled and checked
* at a time instead of byte by byte to improve performance.
*/
-#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100))
+#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
/* Maximum stack depth for reports. */
#define KFENCE_STACK_DEPTH 64
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4b27e245a055..d31fb1e2cb33 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -639,7 +639,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
}
}
-static void do_flush_stats(bool atomic)
+static void do_flush_stats(void)
{
/*
* We always flush the entire tree, so concurrent flushers can just
@@ -652,30 +652,16 @@ static void do_flush_stats(bool atomic)
WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
- if (atomic)
- cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup);
- else
- cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
+ cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
atomic_set(&stats_flush_threshold, 0);
atomic_set(&stats_flush_ongoing, 0);
}
-static bool should_flush_stats(void)
-{
- return atomic_read(&stats_flush_threshold) > num_online_cpus();
-}
-
void mem_cgroup_flush_stats(void)
{
- if (should_flush_stats())
- do_flush_stats(false);
-}
-
-void mem_cgroup_flush_stats_atomic(void)
-{
- if (should_flush_stats())
- do_flush_stats(true);
+ if (atomic_read(&stats_flush_threshold) > num_online_cpus())
+ do_flush_stats();
}
void mem_cgroup_flush_stats_ratelimited(void)
@@ -690,7 +676,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
* Always flush here so that flushing in latency-sensitive paths is
* as cheap as possible.
*/
- do_flush_stats(false);
+ do_flush_stats();
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
}
@@ -1580,13 +1566,10 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
}
-static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
+static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
{
- struct seq_buf s;
int i;
- seq_buf_init(&s, buf, bufsize);
-
/*
* Provide statistics on the state of the memory subsystem as
* well as cumulative event counters that show past behavior.
@@ -1603,21 +1586,21 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
u64 size;
size = memcg_page_state_output(memcg, memory_stats[i].idx);
- seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
+ seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
size += memcg_page_state_output(memcg,
NR_SLAB_RECLAIMABLE_B);
- seq_buf_printf(&s, "slab %llu\n", size);
+ seq_buf_printf(s, "slab %llu\n", size);
}
}
/* Accumulated memory events */
- seq_buf_printf(&s, "pgscan %lu\n",
+ seq_buf_printf(s, "pgscan %lu\n",
memcg_events(memcg, PGSCAN_KSWAPD) +
memcg_events(memcg, PGSCAN_DIRECT) +
memcg_events(memcg, PGSCAN_KHUGEPAGED));
- seq_buf_printf(&s, "pgsteal %lu\n",
+ seq_buf_printf(s, "pgsteal %lu\n",
memcg_events(memcg, PGSTEAL_KSWAPD) +
memcg_events(memcg, PGSTEAL_DIRECT) +
memcg_events(memcg, PGSTEAL_KHUGEPAGED));
@@ -1627,13 +1610,24 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
memcg_vm_event_stat[i] == PGPGOUT)
continue;
- seq_buf_printf(&s, "%s %lu\n",
+ seq_buf_printf(s, "%s %lu\n",
vm_event_name(memcg_vm_event_stat[i]),
memcg_events(memcg, memcg_vm_event_stat[i]));
}
/* The above should easily fit into one page */
- WARN_ON_ONCE(seq_buf_has_overflowed(&s));
+ WARN_ON_ONCE(seq_buf_has_overflowed(s));
+}
+
+static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
+
+static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
+{
+ if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ memcg_stat_format(memcg, s);
+ else
+ memcg1_stat_format(memcg, s);
+ WARN_ON_ONCE(seq_buf_has_overflowed(s));
}
#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -1671,6 +1665,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
/* Use static buffer, for the caller is holding oom_lock. */
static char buf[PAGE_SIZE];
+ struct seq_buf s;
lockdep_assert_held(&oom_lock);
@@ -1693,8 +1688,9 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
pr_info("Memory cgroup stats for ");
pr_cont_cgroup_path(memcg->css.cgroup);
pr_cont(":");
- memory_stat_format(memcg, buf, sizeof(buf));
- pr_info("%s", buf);
+ seq_buf_init(&s, buf, sizeof(buf));
+ memory_stat_format(memcg, &s);
+ seq_buf_do_printk(&s, KERN_INFO);
}
/*
@@ -2028,26 +2024,12 @@ bool mem_cgroup_oom_synchronize(bool handle)
if (locked)
mem_cgroup_oom_notify(memcg);
- if (locked && !READ_ONCE(memcg->oom_kill_disable)) {
- mem_cgroup_unmark_under_oom(memcg);
- finish_wait(&memcg_oom_waitq, &owait.wait);
- mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
- current->memcg_oom_order);
- } else {
- schedule();
- mem_cgroup_unmark_under_oom(memcg);
- finish_wait(&memcg_oom_waitq, &owait.wait);
- }
+ schedule();
+ mem_cgroup_unmark_under_oom(memcg);
+ finish_wait(&memcg_oom_waitq, &owait.wait);
- if (locked) {
+ if (locked)
mem_cgroup_oom_unlock(memcg);
- /*
- * There is no guarantee that an OOM-lock contender
- * sees the wakeups triggered by the OOM kill
- * uncharges. Wake any sleepers explicitly.
- */
- memcg_oom_recover(memcg);
- }
cleanup:
current->memcg_in_oom = NULL;
css_put(&memcg->css);
@@ -2275,7 +2257,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
- if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+ if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
stock->nr_pages -= nr_pages;
ret = true;
}
@@ -2290,7 +2272,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
*/
static void drain_stock(struct memcg_stock_pcp *stock)
{
- struct mem_cgroup *old = stock->cached;
+ struct mem_cgroup *old = READ_ONCE(stock->cached);
if (!old)
return;
@@ -2303,7 +2285,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
}
css_put(&old->css);
- stock->cached = NULL;
+ WRITE_ONCE(stock->cached, NULL);
}
static void drain_local_stock(struct work_struct *dummy)
@@ -2338,10 +2320,10 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
struct memcg_stock_pcp *stock;
stock = this_cpu_ptr(&memcg_stock);
- if (stock->cached != memcg) { /* reset if necessary */
+ if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
drain_stock(stock);
css_get(&memcg->css);
- stock->cached = memcg;
+ WRITE_ONCE(stock->cached, memcg);
}
stock->nr_pages += nr_pages;
@@ -2383,7 +2365,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
bool flush = false;
rcu_read_lock();
- memcg = stock->cached;
+ memcg = READ_ONCE(stock->cached);
if (memcg && stock->nr_pages &&
mem_cgroup_is_descendant(memcg, root_memcg))
flush = true;
@@ -3208,12 +3190,12 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
* accumulating over a page of vmstat data or when pgdat or idx
* changes.
*/
- if (stock->cached_objcg != objcg) {
+ if (READ_ONCE(stock->cached_objcg) != objcg) {
old = drain_obj_stock(stock);
obj_cgroup_get(objcg);
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
- stock->cached_objcg = objcg;
+ WRITE_ONCE(stock->cached_objcg, objcg);
stock->cached_pgdat = pgdat;
} else if (stock->cached_pgdat != pgdat) {
/* Flush the existing cached vmstat data */
@@ -3267,7 +3249,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
- if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
+ if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
stock->nr_bytes -= nr_bytes;
ret = true;
}
@@ -3279,7 +3261,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
{
- struct obj_cgroup *old = stock->cached_objcg;
+ struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
if (!old)
return NULL;
@@ -3332,7 +3314,7 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
stock->cached_pgdat = NULL;
}
- stock->cached_objcg = NULL;
+ WRITE_ONCE(stock->cached_objcg, NULL);
/*
* The `old' objects needs to be released by the caller via
* obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
@@ -3343,10 +3325,11 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
struct mem_cgroup *root_memcg)
{
+ struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
struct mem_cgroup *memcg;
- if (stock->cached_objcg) {
- memcg = obj_cgroup_memcg(stock->cached_objcg);
+ if (objcg) {
+ memcg = obj_cgroup_memcg(objcg);
if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
return true;
}
@@ -3365,10 +3348,10 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
local_lock_irqsave(&memcg_stock.stock_lock, flags);
stock = this_cpu_ptr(&memcg_stock);
- if (stock->cached_objcg != objcg) { /* reset if necessary */
+ if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
old = drain_obj_stock(stock);
obj_cgroup_get(objcg);
- stock->cached_objcg = objcg;
+ WRITE_ONCE(stock->cached_objcg, objcg);
stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
allow_uncharge = true; /* Allow uncharge when objcg changes */
@@ -3699,27 +3682,13 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
if (mem_cgroup_is_root(memcg)) {
/*
- * We can reach here from irq context through:
- * uncharge_batch()
- * |--memcg_check_events()
- * |--mem_cgroup_threshold()
- * |--__mem_cgroup_threshold()
- * |--mem_cgroup_usage
- *
- * rstat flushing is an expensive operation that should not be
- * done from irq context; use stale stats in this case.
- * Arguably, usage threshold events are not reliable on the root
- * memcg anyway since its usage is ill-defined.
- *
- * Additionally, other call paths through memcg_check_events()
- * disable irqs, so make sure we are flushing stats atomically.
+ * Approximate root's usage from global state. This isn't
+ * perfect, but the root usage was always an approximation.
*/
- if (in_task())
- mem_cgroup_flush_stats_atomic();
- val = memcg_page_state(memcg, NR_FILE_PAGES) +
- memcg_page_state(memcg, NR_ANON_MAPPED);
+ val = global_node_page_state(NR_FILE_PAGES) +
+ global_node_page_state(NR_ANON_MAPPED);
if (swap)
- val += memcg_page_state(memcg, MEMCG_SWAP);
+ val += total_swap_pages - get_nr_swap_pages();
} else {
if (!swap)
val = page_counter_read(&memcg->memory);
@@ -4135,9 +4104,8 @@ static const unsigned int memcg1_events[] = {
PGMAJFAULT,
};
-static int memcg_stat_show(struct seq_file *m, void *v)
+static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
{
- struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
unsigned long memory, memsw;
struct mem_cgroup *mi;
unsigned int i;
@@ -4152,18 +4120,18 @@ static int memcg_stat_show(struct seq_file *m, void *v)
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
nr = memcg_page_state_local(memcg, memcg1_stats[i]);
- seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
+ seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i],
nr * memcg_page_state_unit(memcg1_stats[i]));
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
- seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
- memcg_events_local(memcg, memcg1_events[i]));
+ seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
+ memcg_events_local(memcg, memcg1_events[i]));
for (i = 0; i < NR_LRU_LISTS; i++)
- seq_printf(m, "%s %lu\n", lru_list_name(i),
- memcg_page_state_local(memcg, NR_LRU_BASE + i) *
- PAGE_SIZE);
+ seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
+ memcg_page_state_local(memcg, NR_LRU_BASE + i) *
+ PAGE_SIZE);
/* Hierarchical information */
memory = memsw = PAGE_COUNTER_MAX;
@@ -4171,11 +4139,11 @@ static int memcg_stat_show(struct seq_file *m, void *v)
memory = min(memory, READ_ONCE(mi->memory.max));
memsw = min(memsw, READ_ONCE(mi->memsw.max));
}
- seq_printf(m, "hierarchical_memory_limit %llu\n",
- (u64)memory * PAGE_SIZE);
+ seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
+ (u64)memory * PAGE_SIZE);
if (do_memsw_account())
- seq_printf(m, "hierarchical_memsw_limit %llu\n",
- (u64)memsw * PAGE_SIZE);
+ seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
+ (u64)memsw * PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr;
@@ -4183,19 +4151,19 @@ static int memcg_stat_show(struct seq_file *m, void *v)
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
nr = memcg_page_state(memcg, memcg1_stats[i]);
- seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
+ seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
(u64)nr * memcg_page_state_unit(memcg1_stats[i]));
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
- seq_printf(m, "total_%s %llu\n",
- vm_event_name(memcg1_events[i]),
- (u64)memcg_events(memcg, memcg1_events[i]));
+ seq_buf_printf(s, "total_%s %llu\n",
+ vm_event_name(memcg1_events[i]),
+ (u64)memcg_events(memcg, memcg1_events[i]));
for (i = 0; i < NR_LRU_LISTS; i++)
- seq_printf(m, "total_%s %llu\n", lru_list_name(i),
- (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
- PAGE_SIZE);
+ seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
+ (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
+ PAGE_SIZE);
#ifdef CONFIG_DEBUG_VM
{
@@ -4210,12 +4178,10 @@ static int memcg_stat_show(struct seq_file *m, void *v)
anon_cost += mz->lruvec.anon_cost;
file_cost += mz->lruvec.file_cost;
}
- seq_printf(m, "anon_cost %lu\n", anon_cost);
- seq_printf(m, "file_cost %lu\n", file_cost);
+ seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
+ seq_buf_printf(s, "file_cost %lu\n", file_cost);
}
#endif
-
- return 0;
}
static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
@@ -4648,11 +4614,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;
- /*
- * wb_writeback() takes a spinlock and calls
- * wb_over_bg_thresh()->mem_cgroup_wb_stats(). Do not sleep.
- */
- mem_cgroup_flush_stats_atomic();
+ mem_cgroup_flush_stats();
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
@@ -5059,6 +5021,8 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p)
}
#endif
+static int memory_stat_show(struct seq_file *m, void *v);
+
static struct cftype mem_cgroup_legacy_files[] = {
{
.name = "usage_in_bytes",
@@ -5091,7 +5055,7 @@ static struct cftype mem_cgroup_legacy_files[] = {
},
{
.name = "stat",
- .seq_show = memcg_stat_show,
+ .seq_show = memory_stat_show,
},
{
.name = "force_empty",
@@ -6634,10 +6598,12 @@ static int memory_stat_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ struct seq_buf s;
if (!buf)
return -ENOMEM;
- memory_stat_format(memcg, buf, PAGE_SIZE);
+ seq_buf_init(&s, buf, PAGE_SIZE);
+ memory_stat_format(memcg, &s);
seq_puts(m, buf);
kfree(buf);
return 0;
diff --git a/mm/memory.c b/mm/memory.c
index f69fbc251198..5e2c6b1fc00e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1412,6 +1412,7 @@ again:
continue;
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
+ arch_check_zapped_pte(vma, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
zap_install_uffd_wp_if_needed(vma, addr, pte, details,
ptent);
@@ -2016,6 +2017,9 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
BUG_ON(vma->vm_flags & VM_PFNMAP);
vm_flags_set(vma, VM_MIXEDMAP);
}
+ if (track_pfn_remap(vma, &vma->vm_page_prot,
+ page_to_pfn(page), addr, PAGE_SIZE))
+ return -EINVAL;
return insert_page(vma, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
@@ -4100,7 +4104,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
entry = mk_pte(&folio->page, vma->vm_page_prot);
entry = pte_sw_mkyoung(entry);
if (vma->vm_flags & VM_WRITE)
- entry = pte_mkwrite(pte_mkdirty(entry));
+ entry = pte_mkwrite(pte_mkdirty(entry), vma);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
@@ -4796,7 +4800,7 @@ out_map:
pte = pte_modify(old_pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
if (writable)
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
update_mmu_cache(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
diff --git a/mm/migrate.c b/mm/migrate.c
index 01cac26a3127..f770f9f5ac2d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -219,7 +219,7 @@ static bool remove_migration_pte(struct folio *folio,
if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
pte = pte_mkdirty(pte);
if (is_writable_migration_entry(entry))
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
@@ -692,37 +692,32 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
enum migrate_mode mode)
{
struct buffer_head *bh = head;
+ struct buffer_head *failed_bh;
- /* Simple case, sync compaction */
- if (mode != MIGRATE_ASYNC) {
- do {
- lock_buffer(bh);
- bh = bh->b_this_page;
-
- } while (bh != head);
-
- return true;
- }
-
- /* async case, we cannot block on lock_buffer so use trylock_buffer */
do {
if (!trylock_buffer(bh)) {
- /*
- * We failed to lock the buffer and cannot stall in
- * async migration. Release the taken locks
- */
- struct buffer_head *failed_bh = bh;
- bh = head;
- while (bh != failed_bh) {
- unlock_buffer(bh);
- bh = bh->b_this_page;
- }
- return false;
+ if (mode == MIGRATE_ASYNC)
+ goto unlock;
+ if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
+ goto unlock;
+ lock_buffer(bh);
}
bh = bh->b_this_page;
} while (bh != head);
+
return true;
+
+unlock:
+ /* We failed to lock the buffer and cannot stall. */
+ failed_bh = bh;
+ bh = head;
+ while (bh != failed_bh) {
+ unlock_buffer(bh);
+ bh = bh->b_this_page;
+ }
+
+ return false;
}
static int __buffer_migrate_folio(struct address_space *mapping,
@@ -1156,6 +1151,14 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
if (current->flags & PF_MEMALLOC)
goto out;
+ /*
+ * In "light" mode, we can wait for transient locks (eg
+ * inserting a page into the page table), but it's not
+ * worth waiting for I/O.
+ */
+ if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
+ goto out;
+
folio_lock(src);
}
locked = true;
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index d30c9de60b0d..df3f5e9d5f76 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -646,7 +646,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
}
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
- entry = pte_mkwrite(pte_mkdirty(entry));
+ entry = pte_mkwrite(pte_mkdirty(entry), vma);
}
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
diff --git a/mm/mmap.c b/mm/mmap.c
index 13678edaa22c..50daaa40242b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -299,62 +299,44 @@ out:
return origbrk;
}
-#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
-extern void mt_validate(struct maple_tree *mt);
-extern void mt_dump(const struct maple_tree *mt);
-
-/* Validate the maple tree */
-static void validate_mm_mt(struct mm_struct *mm)
-{
- struct maple_tree *mt = &mm->mm_mt;
- struct vm_area_struct *vma_mt;
-
- MA_STATE(mas, mt, 0, 0);
-
- mt_validate(&mm->mm_mt);
- mas_for_each(&mas, vma_mt, ULONG_MAX) {
- if ((vma_mt->vm_start != mas.index) ||
- (vma_mt->vm_end - 1 != mas.last)) {
- pr_emerg("issue in %s\n", current->comm);
- dump_stack();
- dump_vma(vma_mt);
- pr_emerg("mt piv: %p %lu - %lu\n", vma_mt,
- mas.index, mas.last);
- pr_emerg("mt vma: %p %lu - %lu\n", vma_mt,
- vma_mt->vm_start, vma_mt->vm_end);
-
- mt_dump(mas.tree);
- if (vma_mt->vm_end != mas.last + 1) {
- pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n",
- mm, vma_mt->vm_start, vma_mt->vm_end,
- mas.index, mas.last);
- mt_dump(mas.tree);
- }
- VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
- if (vma_mt->vm_start != mas.index) {
- pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n",
- mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
- mt_dump(mas.tree);
- }
- VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
- }
- }
-}
-
+#if defined(CONFIG_DEBUG_VM)
static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
int i = 0;
struct vm_area_struct *vma;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
+ VMA_ITERATOR(vmi, mm, 0);
- validate_mm_mt(mm);
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ mt_validate(&mm->mm_mt);
+#endif
- mas_for_each(&mas, vma, ULONG_MAX) {
+ for_each_vma(vmi, vma) {
#ifdef CONFIG_DEBUG_VM_RB
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
+#endif
+ unsigned long vmi_start, vmi_end;
+ bool warn = 0;
+
+ vmi_start = vma_iter_addr(&vmi);
+ vmi_end = vma_iter_end(&vmi);
+ if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
+ warn = 1;
+
+ if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
+ warn = 1;
+
+ if (warn) {
+ pr_emerg("issue in %s\n", current->comm);
+ dump_stack();
+ dump_vma(vma);
+ pr_emerg("tree range: %px start %lx end %lx\n", vma,
+ vmi_start, vmi_end - 1);
+ vma_iter_dump_tree(&vmi);
+ }
+#ifdef CONFIG_DEBUG_VM_RB
if (anon_vma) {
anon_vma_lock_read(anon_vma);
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
@@ -365,16 +347,15 @@ static void validate_mm(struct mm_struct *mm)
i++;
}
if (i != mm->map_count) {
- pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
+ pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
bug = 1;
}
VM_BUG_ON_MM(bug, mm);
}
-#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
-#define validate_mm_mt(root) do { } while (0)
+#else /* !CONFIG_DEBUG_VM */
#define validate_mm(mm) do { } while (0)
-#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
+#endif /* CONFIG_DEBUG_VM */
/*
* vma has some anon_vma assigned, and is already inserted on that
@@ -1221,11 +1202,11 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode,
*/
unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long pgoff,
- unsigned long *populate, struct list_head *uf)
+ unsigned long flags, vm_flags_t vm_flags,
+ unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf)
{
struct mm_struct *mm = current->mm;
- vm_flags_t vm_flags;
int pkey = 0;
validate_mm(mm);
@@ -1286,7 +1267,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
* to. we assume access permissions have been handled by the open
* of the memory object, so we don't do any here.
*/
- vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
+ vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_LOCKED)
@@ -1475,6 +1456,48 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */
+static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
+{
+ return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
+}
+
+static bool vma_is_shared_writable(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
+ (VM_WRITE | VM_SHARED);
+}
+
+static bool vma_fs_can_writeback(struct vm_area_struct *vma)
+{
+ /* No managed pages to writeback. */
+ if (vma->vm_flags & VM_PFNMAP)
+ return false;
+
+ return vma->vm_file && vma->vm_file->f_mapping &&
+ mapping_can_writeback(vma->vm_file->f_mapping);
+}
+
+/*
+ * Does this VMA require the underlying folios to have their dirty state
+ * tracked?
+ */
+bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
+{
+ /* Only shared, writable VMAs require dirty tracking. */
+ if (!vma_is_shared_writable(vma))
+ return false;
+
+ /* Does the filesystem need to be notified? */
+ if (vm_ops_needs_writenotify(vma->vm_ops))
+ return true;
+
+ /*
+ * Even if the filesystem doesn't indicate a need for writenotify, if it
+ * can writeback, dirty tracking is still required.
+ */
+ return vma_fs_can_writeback(vma);
+}
+
/*
* Some shared mappings will want the pages marked read-only
* to track write events. If so, we'll downgrade vm_page_prot
@@ -1483,21 +1506,18 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
*/
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
{
- vm_flags_t vm_flags = vma->vm_flags;
- const struct vm_operations_struct *vm_ops = vma->vm_ops;
-
/* If it was private or non-writable, the write bit is already clear */
- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
+ if (!vma_is_shared_writable(vma))
return 0;
/* The backer wishes to know when pages are first written to? */
- if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
+ if (vm_ops_needs_writenotify(vma->vm_ops))
return 1;
/* The open routine did something to the protections that pgprot_modify
* won't preserve? */
if (pgprot_val(vm_page_prot) !=
- pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
+ pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
return 0;
/*
@@ -1511,13 +1531,8 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
if (userfaultfd_wp(vma))
return 1;
- /* Specialty mapping? */
- if (vm_flags & VM_PFNMAP)
- return 0;
-
/* Can the mapping track the dirty pages? */
- return vma->vm_file && vma->vm_file->f_mapping &&
- mapping_can_writeback(vma->vm_file->f_mapping);
+ return vma_fs_can_writeback(vma);
}
/*
@@ -2234,7 +2249,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct *new;
int err;
- validate_mm_mt(vma->vm_mm);
+ validate_mm(vma->vm_mm);
WARN_ON(vma->vm_start >= addr);
WARN_ON(vma->vm_end <= addr);
@@ -2292,7 +2307,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
/* Success. */
if (new_below)
vma_next(vmi);
- validate_mm_mt(vma->vm_mm);
+ validate_mm(vma->vm_mm);
return 0;
out_free_mpol:
@@ -2301,7 +2316,7 @@ out_free_vmi:
vma_iter_free(vmi);
out_free_vma:
vm_area_free(new);
- validate_mm_mt(vma->vm_mm);
+ validate_mm(vma->vm_mm);
return err;
}
@@ -2410,7 +2425,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
#endif
}
- next = vma_next(vmi);
+ if (vma_iter_end(vmi) > end)
+ next = vma_iter_load(vmi);
+
+ if (!next)
+ next = vma_next(vmi);
+
if (unlikely(uf)) {
/*
* If userfaultfd_unmap_prep returns an error the vmas
@@ -2623,6 +2643,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
}
cannot_expand:
+ if (prev)
+ vma_iter_next_range(&vmi);
+
/*
* Determine the object being mapped and call the appropriate
* specific mapper. the address has already been validated, but
@@ -2903,7 +2926,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
file = get_file(vma->vm_file);
ret = do_mmap(vma->vm_file, start, size,
- prot, flags, pgoff, &populate, NULL);
+ prot, flags, 0, pgoff, &populate, NULL);
fput(file);
out:
mmap_write_unlock(mm);
@@ -2936,7 +2959,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
arch_unmap(mm, start, end);
ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade);
- validate_mm_mt(mm);
+ validate_mm(mm);
return ret;
}
@@ -2958,7 +2981,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm = current->mm;
struct vma_prepare vp;
- validate_mm_mt(mm);
+ validate_mm(mm);
/*
* Check against address space limits by the changed size
* Note: This happens *after* clearing old mappings in some code paths.
@@ -3199,7 +3222,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
bool faulted_in_anon_vma = true;
VMA_ITERATOR(vmi, mm, addr);
- validate_mm_mt(mm);
+ validate_mm(mm);
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
@@ -3258,7 +3281,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
goto out_vma_link;
*need_rmap_locks = false;
}
- validate_mm_mt(mm);
+ validate_mm(mm);
return new_vma;
out_vma_link:
@@ -3274,7 +3297,7 @@ out_free_mempol:
out_free_vma:
vm_area_free(new_vma);
out:
- validate_mm_mt(mm);
+ validate_mm(mm);
return NULL;
}
@@ -3411,7 +3434,7 @@ static struct vm_area_struct *__install_special_mapping(
int ret;
struct vm_area_struct *vma;
- validate_mm_mt(mm);
+ validate_mm(mm);
vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
@@ -3434,12 +3457,12 @@ static struct vm_area_struct *__install_special_mapping(
perf_event_mmap(vma);
- validate_mm_mt(mm);
+ validate_mm(mm);
return vma;
out:
vm_area_free(vma);
- validate_mm_mt(mm);
+ validate_mm(mm);
return ERR_PTR(ret);
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 92d3d3ca390a..afdb6723782e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -198,7 +198,7 @@ static long change_pte_range(struct mmu_gather *tlb,
if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
!pte_write(ptent) &&
can_change_pte_writable(vma, addr, ptent))
- ptent = pte_mkwrite(ptent);
+ ptent = pte_mkwrite(ptent, vma);
ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
if (pte_needs_flush(oldpte, ptent))
diff --git a/mm/nommu.c b/mm/nommu.c
index f670d9979a26..138826c4a872 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1002,6 +1002,7 @@ unsigned long do_mmap(struct file *file,
unsigned long len,
unsigned long prot,
unsigned long flags,
+ vm_flags_t vm_flags,
unsigned long pgoff,
unsigned long *populate,
struct list_head *uf)
@@ -1009,7 +1010,6 @@ unsigned long do_mmap(struct file *file,
struct vm_area_struct *vma;
struct vm_region *region;
struct rb_node *rb;
- vm_flags_t vm_flags;
unsigned long capabilities, result;
int ret;
VMA_ITERATOR(vmi, current->mm, 0);
@@ -1029,7 +1029,7 @@ unsigned long do_mmap(struct file *file,
/* we've determined that we can make the mapping, now translate what we
* now know into VMA flags */
- vm_flags = determine_vm_flags(file, prot, flags, capabilities);
+ vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
/* we're going to need to record the mapping */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 47421bedc12b..af9c995d3c1e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1521,7 +1521,7 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
/* end_pfn is one past the range we are checking */
end_pfn--;
- if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
+ if (!pfn_valid(end_pfn))
return NULL;
start_page = pfn_to_online_page(start_pfn);
diff --git a/mm/shmem.c b/mm/shmem.c
index e40a08c5c6d7..e517ab50afd9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1030,7 +1030,22 @@ whole_folios:
}
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
folio);
- truncate_inode_folio(mapping, folio);
+
+ if (!folio_test_large(folio)) {
+ truncate_inode_folio(mapping, folio);
+ } else if (truncate_inode_partial_folio(folio, lstart, lend)) {
+ /*
+ * If we split a page, reset the loop so that we
+ * pick up the new sub pages. Otherwise the THP
+ * was entirely dropped or the target range was
+ * zeroed, so just continue the loop as is.
+ */
+ if (!folio_test_large(folio)) {
+ folio_unlock(folio);
+ index = start;
+ break;
+ }
+ }
}
folio_unlock(folio);
}
diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 3f83b10c5031..fe10436d9911 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -237,7 +237,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
}
EXPORT_SYMBOL(shrinker_debugfs_rename);
-struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+ int *debugfs_id)
{
struct dentry *entry = shrinker->debugfs_entry;
@@ -246,14 +247,18 @@ struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
kfree_const(shrinker->name);
shrinker->name = NULL;
- if (entry) {
- ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
- shrinker->debugfs_entry = NULL;
- }
+ *debugfs_id = entry ? shrinker->debugfs_id : -1;
+ shrinker->debugfs_entry = NULL;
return entry;
}
+void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id)
+{
+ debugfs_remove_recursive(debugfs_entry);
+ ida_free(&shrinker_debugfs_ida, debugfs_id);
+}
+
static int __init shrinker_debugfs_init(void)
{
struct shrinker *shrinker;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index e97a0b4889fc..6dea7f57026e 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -72,7 +72,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
if (page_in_cache && !vm_shared)
writable = false;
if (writable)
- _dst_pte = pte_mkwrite(_dst_pte);
+ _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
if (flags & MFILL_ATOMIC_WP)
_dst_pte = pte_mkuffd_wp(_dst_pte);
diff --git a/mm/util.c b/mm/util.c
index dd12b9531ac4..8e7fc6cacab4 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -540,7 +540,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
if (!ret) {
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
+ ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
&uf);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d257916f39e5..6d0cd2840cf0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -805,6 +805,7 @@ EXPORT_SYMBOL(register_shrinker);
void unregister_shrinker(struct shrinker *shrinker)
{
struct dentry *debugfs_entry;
+ int debugfs_id;
if (!(shrinker->flags & SHRINKER_REGISTERED))
return;
@@ -814,13 +815,13 @@ void unregister_shrinker(struct shrinker *shrinker)
shrinker->flags &= ~SHRINKER_REGISTERED;
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
unregister_memcg_shrinker(shrinker);
- debugfs_entry = shrinker_debugfs_remove(shrinker);
+ debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
mutex_unlock(&shrinker_mutex);
atomic_inc(&shrinker_srcu_generation);
synchronize_srcu(&shrinker_srcu);
- debugfs_remove_recursive(debugfs_entry);
+ shrinker_debugfs_remove(debugfs_entry, debugfs_id);
kfree(shrinker->nr_deferred);
shrinker->nr_deferred = NULL;
diff --git a/mm/workingset.c b/mm/workingset.c
index 817758951886..d81f9dafc9f1 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -255,6 +255,29 @@ static void *lru_gen_eviction(struct folio *folio)
return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
}
+/*
+ * Tests if the shadow entry is for a folio that was recently evicted.
+ * Fills in @memcgid, @pglist_data, @token, @workingset with the values
+ * unpacked from shadow.
+ */
+static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
+ struct pglist_data **pgdat, unsigned long *token, bool *workingset)
+{
+ struct mem_cgroup *eviction_memcg;
+ struct lruvec *lruvec;
+ struct lru_gen_folio *lrugen;
+ unsigned long min_seq;
+
+ unpack_shadow(shadow, memcgid, pgdat, token, workingset);
+ eviction_memcg = mem_cgroup_from_id(*memcgid);
+
+ lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat);
+ lrugen = &lruvec->lrugen;
+
+ min_seq = READ_ONCE(lrugen->min_seq[file]);
+ return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH));
+}
+
static void lru_gen_refault(struct folio *folio, void *shadow)
{
int hist, tier, refs;
@@ -269,23 +292,22 @@ static void lru_gen_refault(struct folio *folio, void *shadow)
int type = folio_is_file_lru(folio);
int delta = folio_nr_pages(folio);
- unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset);
-
- if (pgdat != folio_pgdat(folio))
- return;
-
rcu_read_lock();
+ if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token,
+ &workingset))
+ goto unlock;
+
memcg = folio_memcg_rcu(folio);
if (memcg_id != mem_cgroup_id(memcg))
goto unlock;
+ if (pgdat != folio_pgdat(folio))
+ return;
+
lruvec = mem_cgroup_lruvec(memcg, pgdat);
lrugen = &lruvec->lrugen;
-
min_seq = READ_ONCE(lrugen->min_seq[type]);
- if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)))
- goto unlock;
hist = lru_hist_from_seq(min_seq);
/* see the comment in folio_lru_refs() */
@@ -317,6 +339,12 @@ static void *lru_gen_eviction(struct folio *folio)
return NULL;
}
+static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid,
+ struct pglist_data **pgdat, unsigned long *token, bool *workingset)
+{
+ return false;
+}
+
static void lru_gen_refault(struct folio *folio, void *shadow)
{
}
@@ -385,42 +413,34 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
}
/**
- * workingset_refault - Evaluate the refault of a previously evicted folio.
- * @folio: The freshly allocated replacement folio.
- * @shadow: Shadow entry of the evicted folio.
- *
- * Calculates and evaluates the refault distance of the previously
- * evicted folio in the context of the node and the memcg whose memory
- * pressure caused the eviction.
+ * workingset_test_recent - tests if the shadow entry is for a folio that was
+ * recently evicted. Also fills in @workingset with the value unpacked from
+ * shadow.
+ * @shadow: the shadow entry to be tested.
+ * @file: whether the corresponding folio is from the file lru.
+ * @workingset: where the workingset value unpacked from shadow should
+ * be stored.
+ *
+ * Return: true if the shadow is for a recently evicted folio; false otherwise.
*/
-void workingset_refault(struct folio *folio, void *shadow)
+bool workingset_test_recent(void *shadow, bool file, bool *workingset)
{
- bool file = folio_is_file_lru(folio);
struct mem_cgroup *eviction_memcg;
struct lruvec *eviction_lruvec;
unsigned long refault_distance;
unsigned long workingset_size;
- struct pglist_data *pgdat;
- struct mem_cgroup *memcg;
- unsigned long eviction;
- struct lruvec *lruvec;
unsigned long refault;
- bool workingset;
int memcgid;
- long nr;
+ struct pglist_data *pgdat;
+ unsigned long eviction;
- if (lru_gen_enabled()) {
- lru_gen_refault(folio, shadow);
- return;
- }
+ if (lru_gen_enabled())
+ return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction,
+ workingset);
- unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
+ unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
eviction <<= bucket_order;
- /* Flush stats (and potentially sleep) before holding RCU read lock */
- mem_cgroup_flush_stats_ratelimited();
-
- rcu_read_lock();
/*
* Look up the memcg associated with the stored ID. It might
* have been deleted since the folio's eviction.
@@ -439,7 +459,8 @@ void workingset_refault(struct folio *folio, void *shadow)
*/
eviction_memcg = mem_cgroup_from_id(memcgid);
if (!mem_cgroup_disabled() && !eviction_memcg)
- goto out;
+ return false;
+
eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
refault = atomic_long_read(&eviction_lruvec->nonresident_age);
@@ -462,20 +483,6 @@ void workingset_refault(struct folio *folio, void *shadow)
refault_distance = (refault - eviction) & EVICTION_MASK;
/*
- * The activation decision for this folio is made at the level
- * where the eviction occurred, as that is where the LRU order
- * during folio reclaim is being determined.
- *
- * However, the cgroup that will own the folio is the one that
- * is actually experiencing the refault event.
- */
- nr = folio_nr_pages(folio);
- memcg = folio_memcg(folio);
- pgdat = folio_pgdat(folio);
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
-
- mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
- /*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
* all the memory was available to the workingset. Whether
@@ -495,7 +502,54 @@ void workingset_refault(struct folio *folio, void *shadow)
NR_INACTIVE_ANON);
}
}
- if (refault_distance > workingset_size)
+
+ return refault_distance <= workingset_size;
+}
+
+/**
+ * workingset_refault - Evaluate the refault of a previously evicted folio.
+ * @folio: The freshly allocated replacement folio.
+ * @shadow: Shadow entry of the evicted folio.
+ *
+ * Calculates and evaluates the refault distance of the previously
+ * evicted folio in the context of the node and the memcg whose memory
+ * pressure caused the eviction.
+ */
+void workingset_refault(struct folio *folio, void *shadow)
+{
+ bool file = folio_is_file_lru(folio);
+ struct pglist_data *pgdat;
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+ bool workingset;
+ long nr;
+
+ if (lru_gen_enabled()) {
+ lru_gen_refault(folio, shadow);
+ return;
+ }
+
+ /* Flush stats (and potentially sleep) before holding RCU read lock */
+ mem_cgroup_flush_stats_ratelimited();
+
+ rcu_read_lock();
+
+ /*
+ * The activation decision for this folio is made at the level
+ * where the eviction occurred, as that is where the LRU order
+ * during folio reclaim is being determined.
+ *
+ * However, the cgroup that will own the folio is the one that
+ * is actually experiencing the refault event.
+ */
+ nr = folio_nr_pages(folio);
+ memcg = folio_memcg(folio);
+ pgdat = folio_pgdat(folio);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+
+ mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
+
+ if (!workingset_test_recent(shadow, file, &workingset))
goto out;
folio_set_active(folio);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 44ddaf5d601e..02f7f414aade 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
obj_to_location(obj, &page, &obj_idx);
zspage = get_zspage(page);
-#ifdef CONFIG_ZPOOL
- /*
- * Move the zspage to front of pool's LRU.
- *
- * Note that this is swap-specific, so by definition there are no ongoing
- * accesses to the memory while the page is swapped out that would make
- * it "hot". A new entry is hot, then ages to the tail until it gets either
- * written back or swaps back in.
- *
- * Furthermore, map is also called during writeback. We must not put an
- * isolated page on the LRU mid-reclaim.
- *
- * As a result, only update the LRU when the page is mapped for write
- * when it's first instantiated.
- *
- * This is a deviation from the other backends, which perform this update
- * in the allocation function (zbud_alloc, z3fold_alloc).
- */
- if (mm == ZS_MM_WO) {
- if (!list_empty(&zspage->lru))
- list_del(&zspage->lru);
- list_add(&zspage->lru, &pool->lru);
- }
-#endif
-
/*
* migration cannot move any zpages in this zspage. Here, pool->lock
* is too heavy since callers would take some time until they calls
@@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
fix_fullness_group(class, zspage);
record_obj(handle, obj);
class_stat_inc(class, ZS_OBJS_INUSE, 1);
- spin_unlock(&pool->lock);
- return handle;
+ goto out;
}
spin_unlock(&pool->lock);
@@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
/* We completely set up zspage so mark them as movable */
SetZsPageMovable(pool, zspage);
+out:
+#ifdef CONFIG_ZPOOL
+ /* Add/move zspage to beginning of LRU */
+ if (!list_empty(&zspage->lru))
+ list_del(&zspage->lru);
+ list_add(&zspage->lru, &pool->lru);
+#endif
+
spin_unlock(&pool->lock);
return handle;
diff --git a/mm/zswap.c b/mm/zswap.c
index e1e621d0b6a0..59da2a415fbb 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1020,6 +1020,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
goto fail;
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
+ /*
+ * Having a local reference to the zswap entry doesn't exclude
+ * swapping from invalidating and recycling the swap slot. Once
+ * the swapcache is secured against concurrent swapping to and
+ * from the slot, recheck that the entry is still current before
+ * writing.
+ */
+ spin_lock(&tree->lock);
+ if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
+ spin_unlock(&tree->lock);
+ delete_from_swap_cache(page_folio(page));
+ ret = -ENOMEM;
+ goto fail;
+ }
+ spin_unlock(&tree->lock);
+
/* decompress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 640b951bf40a..f75ef12f18f7 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1083,8 +1083,28 @@ static void hci_conn_unlink(struct hci_conn *conn)
if (!conn->parent) {
struct hci_link *link, *t;
- list_for_each_entry_safe(link, t, &conn->link_list, list)
- hci_conn_unlink(link->conn);
+ list_for_each_entry_safe(link, t, &conn->link_list, list) {
+ struct hci_conn *child = link->conn;
+
+ hci_conn_unlink(child);
+
+ /* If hdev is down it means
+ * hci_dev_close_sync/hci_conn_hash_flush is in progress
+ * and links don't need to be cleanup as all connections
+ * would be cleanup.
+ */
+ if (!test_bit(HCI_UP, &hdev->flags))
+ continue;
+
+ /* Due to race, SCO connection might be not established
+ * yet at this point. Delete it now, otherwise it is
+ * possible for it to be stuck and can't be deleted.
+ */
+ if ((child->type == SCO_LINK ||
+ child->type == ESCO_LINK) &&
+ child->handle == HCI_CONN_HANDLE_UNSET)
+ hci_conn_del(child);
+ }
return;
}
@@ -1092,35 +1112,30 @@ static void hci_conn_unlink(struct hci_conn *conn)
if (!conn->link)
return;
- hci_conn_put(conn->parent);
- conn->parent = NULL;
-
list_del_rcu(&conn->link->list);
synchronize_rcu();
+ hci_conn_drop(conn->parent);
+ hci_conn_put(conn->parent);
+ conn->parent = NULL;
+
kfree(conn->link);
conn->link = NULL;
-
- /* Due to race, SCO connection might be not established
- * yet at this point. Delete it now, otherwise it is
- * possible for it to be stuck and can't be deleted.
- */
- if (conn->handle == HCI_CONN_HANDLE_UNSET)
- hci_conn_del(conn);
}
-int hci_conn_del(struct hci_conn *conn)
+void hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
+ hci_conn_unlink(conn);
+
cancel_delayed_work_sync(&conn->disc_work);
cancel_delayed_work_sync(&conn->auto_accept_work);
cancel_delayed_work_sync(&conn->idle_work);
if (conn->type == ACL_LINK) {
- hci_conn_unlink(conn);
/* Unacked frames */
hdev->acl_cnt += conn->sent;
} else if (conn->type == LE_LINK) {
@@ -1131,13 +1146,6 @@ int hci_conn_del(struct hci_conn *conn)
else
hdev->acl_cnt += conn->sent;
} else {
- struct hci_conn *acl = conn->parent;
-
- if (acl) {
- hci_conn_unlink(conn);
- hci_conn_drop(acl);
- }
-
/* Unacked ISO frames */
if (conn->type == ISO_LINK) {
if (hdev->iso_pkts)
@@ -1160,8 +1168,6 @@ int hci_conn_del(struct hci_conn *conn)
* rest of hci_conn_del.
*/
hci_conn_cleanup(conn);
-
- return 0;
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -2462,22 +2468,21 @@ timer:
/* Drop all connection on the device */
void hci_conn_hash_flush(struct hci_dev *hdev)
{
- struct hci_conn_hash *h = &hdev->conn_hash;
- struct hci_conn *c, *n;
+ struct list_head *head = &hdev->conn_hash.list;
+ struct hci_conn *conn;
BT_DBG("hdev %s", hdev->name);
- list_for_each_entry_safe(c, n, &h->list, list) {
- c->state = BT_CLOSED;
-
- hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
-
- /* Unlink before deleting otherwise it is possible that
- * hci_conn_del removes the link which may cause the list to
- * contain items already freed.
- */
- hci_conn_unlink(c);
- hci_conn_del(c);
+ /* We should not traverse the list here, because hci_conn_del
+ * can remove extra links, which may cause the list traversal
+ * to hit items that have already been released.
+ */
+ while ((conn = list_first_entry_or_null(head,
+ struct hci_conn,
+ list)) != NULL) {
+ conn->state = BT_CLOSED;
+ hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
+ hci_conn_del(conn);
}
}
diff --git a/net/core/filter.c b/net/core/filter.c
index d9ce04ca22ce..451b0ec7f242 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6916,6 +6916,8 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
FIELD)); \
} while (0)
+ BTF_TYPE_EMIT(struct bpf_tcp_sock);
+
switch (si->off) {
case offsetof(struct bpf_tcp_sock, rtt_min):
BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 26a586007d8b..515ec5cdc79c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5298,7 +5298,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
u32 csum_start = skb_headroom(skb) + (u32)start;
- if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
+ if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
start, off, skb_headroom(skb), skb_headlen(skb));
return false;
@@ -5306,7 +5306,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = csum_start;
skb->csum_offset = off;
- skb_set_transport_header(skb, start);
+ skb->transport_header = csum_start;
return true;
}
EXPORT_SYMBOL_GPL(skb_partial_csum_set);
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index 35d384dfe29d..41a556be1017 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -120,6 +120,29 @@ ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_push);
+int ieee802154_mac_cmd_push(struct sk_buff *skb, void *f,
+ const void *pl, unsigned int pl_len)
+{
+ struct ieee802154_mac_cmd_frame *frame = f;
+ struct ieee802154_mac_cmd_pl *mac_pl = &frame->mac_pl;
+ struct ieee802154_hdr *mhr = &frame->mhr;
+ int ret;
+
+ skb_reserve(skb, sizeof(*mhr));
+ ret = ieee802154_hdr_push(skb, mhr);
+ if (ret < 0)
+ return ret;
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = ret;
+
+ skb_put_data(skb, mac_pl, sizeof(*mac_pl));
+ skb_put_data(skb, pl, pl_len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_push);
+
int ieee802154_beacon_push(struct sk_buff *skb,
struct ieee802154_beacon_frame *beacon)
{
@@ -284,6 +307,19 @@ ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr)
}
EXPORT_SYMBOL_GPL(ieee802154_hdr_pull);
+int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb,
+ struct ieee802154_mac_cmd_pl *mac_pl)
+{
+ if (!pskb_may_pull(skb, sizeof(*mac_pl)))
+ return -EINVAL;
+
+ memcpy(mac_pl, skb->data, sizeof(*mac_pl));
+ skb_pull(skb, sizeof(*mac_pl));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_pl_pull);
+
int
ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
{
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 832e3c50816c..d610c1886160 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -233,7 +233,7 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
NLA_POLICY_RANGE(NLA_U8, NL802154_SCAN_DONE_REASON_FINISHED,
NL802154_SCAN_DONE_REASON_ABORTED),
[NL802154_ATTR_BEACON_INTERVAL] =
- NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_SCAN_DURATION),
+ NLA_POLICY_MAX(NLA_U8, IEEE802154_ACTIVE_SCAN_DURATION),
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
[NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, },
@@ -1417,6 +1417,11 @@ static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ if (wpan_phy->flags & WPAN_PHY_FLAG_DATAGRAMS_ONLY) {
+ NL_SET_ERR_MSG(info->extack, "PHY only supports datagrams");
+ return -EOPNOTSUPP;
+ }
+
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
@@ -1426,6 +1431,7 @@ static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info)
type = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_TYPE]);
switch (type) {
+ case NL802154_SCAN_ACTIVE:
case NL802154_SCAN_PASSIVE:
request->type = type;
break;
@@ -1583,6 +1589,11 @@ nl802154_send_beacons(struct sk_buff *skb, struct genl_info *info)
return -EPERM;
}
+ if (wpan_phy->flags & WPAN_PHY_FLAG_DATAGRAMS_ONLY) {
+ NL_SET_ERR_MSG(info->extack, "PHY only supports datagrams");
+ return -EOPNOTSUPP;
+ }
+
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 63bab99ed368..c347ec9ff8c9 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -58,6 +58,7 @@ struct ieee802154_local {
/* Scanning */
u8 scan_page;
u8 scan_channel;
+ struct ieee802154_beacon_req_frame scan_beacon_req;
struct cfg802154_scan_request __rcu *scan_req;
struct delayed_work scan_work;
@@ -70,6 +71,8 @@ struct ieee802154_local {
/* Asynchronous tasks */
struct list_head rx_beacon_list;
struct work_struct rx_beacon_work;
+ struct list_head rx_mac_cmd_list;
+ struct work_struct rx_mac_cmd_work;
bool started;
bool suspended;
@@ -154,6 +157,22 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
return test_bit(SDATA_STATE_RUNNING, &sdata->state);
}
+static inline int ieee802154_get_mac_cmd(struct sk_buff *skb, u8 *mac_cmd)
+{
+ struct ieee802154_mac_cmd_pl mac_pl;
+ int ret;
+
+ if (mac_cb(skb)->type != IEEE802154_FC_TYPE_MAC_CMD)
+ return -EINVAL;
+
+ ret = ieee802154_mac_cmd_pl_pull(skb, &mac_pl);
+ if (ret)
+ return ret;
+
+ *mac_cmd = mac_pl.cmd_id;
+ return 0;
+}
+
extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
@@ -275,6 +294,8 @@ static inline bool mac802154_is_beaconing(struct ieee802154_local *local)
return test_bit(IEEE802154_IS_BEACONING, &local->ongoing);
}
+void mac802154_rx_mac_cmd_worker(struct work_struct *work);
+
/* interface handling */
int ieee802154_iface_init(void);
void ieee802154_iface_exit(void);
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index ee23e234b998..357ece67432b 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -90,6 +90,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
INIT_LIST_HEAD(&local->interfaces);
INIT_LIST_HEAD(&local->rx_beacon_list);
+ INIT_LIST_HEAD(&local->rx_mac_cmd_list);
mutex_init(&local->iflist_mtx);
tasklet_setup(&local->tasklet, ieee802154_tasklet_handler);
@@ -100,6 +101,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
INIT_DELAYED_WORK(&local->scan_work, mac802154_scan_worker);
INIT_WORK(&local->rx_beacon_work, mac802154_rx_beacon_worker);
INIT_DELAYED_WORK(&local->beacon_work, mac802154_beacon_worker);
+ INIT_WORK(&local->rx_mac_cmd_work, mac802154_rx_mac_cmd_worker);
/* init supported flags with 802.15.4 default ranges */
phy->supported.max_minbe = 8;
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index da0628ee3c89..e2434b4fe514 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -47,6 +47,62 @@ void mac802154_rx_beacon_worker(struct work_struct *work)
kfree(mac_pkt);
}
+static bool mac802154_should_answer_beacon_req(struct ieee802154_local *local)
+{
+ struct cfg802154_beacon_request *beacon_req;
+ unsigned int interval;
+
+ rcu_read_lock();
+ beacon_req = rcu_dereference(local->beacon_req);
+ if (!beacon_req) {
+ rcu_read_unlock();
+ return false;
+ }
+
+ interval = beacon_req->interval;
+ rcu_read_unlock();
+
+ if (!mac802154_is_beaconing(local))
+ return false;
+
+ return interval == IEEE802154_ACTIVE_SCAN_DURATION;
+}
+
+void mac802154_rx_mac_cmd_worker(struct work_struct *work)
+{
+ struct ieee802154_local *local =
+ container_of(work, struct ieee802154_local, rx_mac_cmd_work);
+ struct cfg802154_mac_pkt *mac_pkt;
+ u8 mac_cmd;
+ int rc;
+
+ mac_pkt = list_first_entry_or_null(&local->rx_mac_cmd_list,
+ struct cfg802154_mac_pkt, node);
+ if (!mac_pkt)
+ return;
+
+ rc = ieee802154_get_mac_cmd(mac_pkt->skb, &mac_cmd);
+ if (rc)
+ goto out;
+
+ switch (mac_cmd) {
+ case IEEE802154_CMD_BEACON_REQ:
+ dev_dbg(&mac_pkt->sdata->dev->dev, "processing BEACON REQ\n");
+ if (!mac802154_should_answer_beacon_req(local))
+ break;
+
+ queue_delayed_work(local->mac_wq, &local->beacon_work, 0);
+ break;
+ default:
+ break;
+ }
+
+out:
+ list_del(&mac_pkt->node);
+ kfree_skb(mac_pkt->skb);
+ kfree(mac_pkt);
+}
+
static int
ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
struct sk_buff *skb, const struct ieee802154_hdr *hdr)
@@ -140,8 +196,20 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
list_add_tail(&mac_pkt->node, &sdata->local->rx_beacon_list);
queue_work(sdata->local->mac_wq, &sdata->local->rx_beacon_work);
return NET_RX_SUCCESS;
- case IEEE802154_FC_TYPE_ACK:
+
case IEEE802154_FC_TYPE_MAC_CMD:
+ dev_dbg(&sdata->dev->dev, "MAC COMMAND received\n");
+ mac_pkt = kzalloc(sizeof(*mac_pkt), GFP_ATOMIC);
+ if (!mac_pkt)
+ goto fail;
+
+ mac_pkt->skb = skb_get(skb);
+ mac_pkt->sdata = sdata;
+ list_add_tail(&mac_pkt->node, &sdata->local->rx_mac_cmd_list);
+ queue_work(sdata->local->mac_wq, &sdata->local->rx_mac_cmd_work);
+ return NET_RX_SUCCESS;
+
+ case IEEE802154_FC_TYPE_ACK:
goto fail;
case IEEE802154_FC_TYPE_DATA:
diff --git a/net/mac802154/scan.c b/net/mac802154/scan.c
index 5c191bedd72c..d9658f2c4ae6 100644
--- a/net/mac802154/scan.c
+++ b/net/mac802154/scan.c
@@ -18,8 +18,12 @@
#define IEEE802154_BEACON_MHR_SZ 13
#define IEEE802154_BEACON_PL_SZ 4
+#define IEEE802154_MAC_CMD_MHR_SZ 23
+#define IEEE802154_MAC_CMD_PL_SZ 1
#define IEEE802154_BEACON_SKB_SZ (IEEE802154_BEACON_MHR_SZ + \
IEEE802154_BEACON_PL_SZ)
+#define IEEE802154_MAC_CMD_SKB_SZ (IEEE802154_MAC_CMD_MHR_SZ + \
+ IEEE802154_MAC_CMD_PL_SZ)
/* mac802154_scan_cleanup_locked() must be called upon scan completion or abort.
* - Completions are asynchronous, not locked by the rtnl and decided by the
@@ -131,6 +135,42 @@ static int mac802154_scan_find_next_chan(struct ieee802154_local *local,
return 0;
}
+static int mac802154_scan_prepare_beacon_req(struct ieee802154_local *local)
+{
+ memset(&local->scan_beacon_req, 0, sizeof(local->scan_beacon_req));
+ local->scan_beacon_req.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD;
+ local->scan_beacon_req.mhr.fc.dest_addr_mode = IEEE802154_SHORT_ADDRESSING;
+ local->scan_beacon_req.mhr.fc.version = IEEE802154_2003_STD;
+ local->scan_beacon_req.mhr.fc.source_addr_mode = IEEE802154_NO_ADDRESSING;
+ local->scan_beacon_req.mhr.dest.mode = IEEE802154_ADDR_SHORT;
+ local->scan_beacon_req.mhr.dest.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
+ local->scan_beacon_req.mhr.dest.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+ local->scan_beacon_req.mac_pl.cmd_id = IEEE802154_CMD_BEACON_REQ;
+
+ return 0;
+}
+
+static int mac802154_transmit_beacon_req(struct ieee802154_local *local,
+ struct ieee802154_sub_if_data *sdata)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ skb->dev = sdata->dev;
+
+ ret = ieee802154_mac_cmd_push(skb, &local->scan_beacon_req, NULL, 0);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return ieee802154_mlme_tx(local, sdata, skb);
+}
+
void mac802154_scan_worker(struct work_struct *work)
{
struct ieee802154_local *local =
@@ -206,6 +246,13 @@ void mac802154_scan_worker(struct work_struct *work)
goto end_scan;
}
+ if (scan_req->type == NL802154_SCAN_ACTIVE) {
+ ret = mac802154_transmit_beacon_req(local, sdata);
+ if (ret)
+ dev_err(&sdata->dev->dev,
+ "Error when transmitting beacon request (%d)\n", ret);
+ }
+
ieee802154_configure_durations(wpan_phy, page, channel);
scan_duration = mac802154_scan_get_channel_time(scan_req_duration,
wpan_phy->symbol_duration);
@@ -231,8 +278,8 @@ int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata,
if (mac802154_is_scanning(local))
return -EBUSY;
- /* TODO: support other scanning type */
- if (request->type != NL802154_SCAN_PASSIVE)
+ if (request->type != NL802154_SCAN_PASSIVE &&
+ request->type != NL802154_SCAN_ACTIVE)
return -EOPNOTSUPP;
/* Store scanning parameters */
@@ -247,6 +294,8 @@ int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata,
local->scan_page = request->page;
local->scan_channel = -1;
set_bit(IEEE802154_IS_SCANNING, &local->ongoing);
+ if (request->type == NL802154_SCAN_ACTIVE)
+ mac802154_scan_prepare_beacon_req(local);
nl802154_scan_started(request->wpan_phy, request->wpan_dev);
@@ -354,6 +403,7 @@ void mac802154_beacon_worker(struct work_struct *work)
struct cfg802154_beacon_request *beacon_req;
struct ieee802154_sub_if_data *sdata;
struct wpan_dev *wpan_dev;
+ u8 interval;
int ret;
rcu_read_lock();
@@ -374,6 +424,7 @@ void mac802154_beacon_worker(struct work_struct *work)
}
wpan_dev = beacon_req->wpan_dev;
+ interval = beacon_req->interval;
rcu_read_unlock();
@@ -383,8 +434,9 @@ void mac802154_beacon_worker(struct work_struct *work)
dev_err(&sdata->dev->dev,
"Beacon could not be transmitted (%d)\n", ret);
- queue_delayed_work(local->mac_wq, &local->beacon_work,
- local->beacon_interval);
+ if (interval < IEEE802154_ACTIVE_SCAN_DURATION)
+ queue_delayed_work(local->mac_wq, &local->beacon_work,
+ local->beacon_interval);
}
int mac802154_stop_beacons_locked(struct ieee802154_local *local,
@@ -439,13 +491,17 @@ int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata,
local->beacon.mhr.source.pan_id = request->wpan_dev->pan_id;
local->beacon.mhr.source.extended_addr = request->wpan_dev->extended_addr;
local->beacon.mac_pl.beacon_order = request->interval;
- local->beacon.mac_pl.superframe_order = request->interval;
+ if (request->interval <= IEEE802154_MAX_SCAN_DURATION)
+ local->beacon.mac_pl.superframe_order = request->interval;
local->beacon.mac_pl.final_cap_slot = 0xf;
local->beacon.mac_pl.battery_life_ext = 0;
- /* TODO: Fill this field depending on the coordinator capacity */
+ /* TODO: Fill this field with the coordinator situation in the network */
local->beacon.mac_pl.pan_coordinator = 1;
local->beacon.mac_pl.assoc_permit = 1;
+ if (request->interval == IEEE802154_ACTIVE_SCAN_DURATION)
+ return 0;
+
/* Start the beacon work */
local->beacon_interval =
mac802154_scan_get_channel_time(request->interval,
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 212c5d57465a..9734e1d9f991 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -639,6 +639,16 @@ gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
+#if IS_ENABLED(CONFIG_KUNIT)
+ /*
+ * CBC-CTS does not define an output IV but RFC 3962 defines it as the
+ * penultimate block of ciphertext, so copy that into the IV buffer
+ * before returning.
+ */
+ if (encrypt)
+ memcpy(iv, data, crypto_sync_skcipher_ivsize(cipher));
+#endif
+
out:
kfree(data);
return ret;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 26367cf4c17a..43e32129a583 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1416,7 +1416,7 @@ err_bad_rpc:
/* Only RPCv2 supported */
xdr_stream_encode_u32(xdr, RPC_VERSION);
xdr_stream_encode_u32(xdr, RPC_VERSION);
- goto sendit;
+ return 1; /* don't wrap */
err_bad_auth:
dprintk("svc: authentication failed (%d)\n",
@@ -1432,7 +1432,7 @@ err_bad_auth:
err_bad_prog:
dprintk("svc: unknown program %d\n", rqstp->rq_prog);
serv->sv_stats->rpcbadfmt++;
- xdr_stream_encode_u32(xdr, RPC_PROG_UNAVAIL);
+ *rqstp->rq_accept_statp = rpc_prog_unavail;
goto sendit;
err_bad_vers:
@@ -1440,7 +1440,12 @@ err_bad_vers:
rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
serv->sv_stats->rpcbadfmt++;
- xdr_stream_encode_u32(xdr, RPC_PROG_MISMATCH);
+ *rqstp->rq_accept_statp = rpc_prog_mismatch;
+
+ /*
+ * svc_authenticate() has already added the verifier and
+ * advanced the stream just past rq_accept_statp.
+ */
xdr_stream_encode_u32(xdr, process.mismatch.lovers);
xdr_stream_encode_u32(xdr, process.mismatch.hivers);
goto sendit;
@@ -1449,19 +1454,19 @@ err_bad_proc:
svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
serv->sv_stats->rpcbadfmt++;
- xdr_stream_encode_u32(xdr, RPC_PROC_UNAVAIL);
+ *rqstp->rq_accept_statp = rpc_proc_unavail;
goto sendit;
err_garbage_args:
svc_printk(rqstp, "failed to decode RPC header\n");
serv->sv_stats->rpcbadfmt++;
- xdr_stream_encode_u32(xdr, RPC_GARBAGE_ARGS);
+ *rqstp->rq_accept_statp = rpc_garbage_args;
goto sendit;
err_system_err:
serv->sv_stats->rpcbadfmt++;
- xdr_stream_encode_u32(xdr, RPC_SYSTEM_ERR);
+ *rqstp->rq_accept_statp = rpc_system_err;
goto sendit;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a51c9b989d58..9989194446a5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -895,6 +895,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
trace_svcsock_accept_err(xprt, serv->sv_name, err);
return NULL;
}
+ if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL)))
+ return NULL;
+
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
err = kernel_getpeername(newsock, sin);
@@ -935,7 +938,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
return &newsvsk->sk_xprt;
failed:
- sock_release(newsock);
+ sockfd_put(newsock);
return NULL;
}
@@ -1430,7 +1433,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
struct socket *sock,
int flags)
{
- struct file *filp = NULL;
struct svc_sock *svsk;
struct sock *inet;
int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
@@ -1439,14 +1441,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
if (!svsk)
return ERR_PTR(-ENOMEM);
- if (!sock->file) {
- filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
- if (IS_ERR(filp)) {
- kfree(svsk);
- return ERR_CAST(filp);
- }
- }
-
inet = sock->sk;
if (pmap_register) {
@@ -1456,8 +1450,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
inet->sk_protocol,
ntohs(inet_sk(inet)->inet_sport));
if (err < 0) {
- if (filp)
- fput(filp);
kfree(svsk);
return ERR_PTR(err);
}
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index b2df1e0f8153..26f6d304451e 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
{
struct xsk_dma_map *dma_map;
- if (pool->dma_pages_cnt == 0)
+ if (!pool->dma_pages)
return;
dma_map = xp_find_dma_map(pool);
@@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
__xp_dma_unmap(dma_map, attrs);
kvfree(pool->dma_pages);
+ pool->dma_pages = NULL;
pool->dma_pages_cnt = 0;
pool->dev = NULL;
}
@@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
if (pool->unaligned) {
xskb = pool->free_heads[--pool->free_heads_cnt];
xp_init_xskb_addr(xskb, pool, addr);
- if (pool->dma_pages_cnt)
+ if (pool->dma_pages)
xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
} else {
xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
@@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd
if (pool->unaligned) {
xskb = pool->free_heads[--pool->free_heads_cnt];
xp_init_xskb_addr(xskb, pool, addr);
- if (pool->dma_pages_cnt)
+ if (pool->dma_pages)
xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr);
} else {
xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)];
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index bef28c6187eb..408f5e55744e 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -378,7 +378,7 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
break;
default:
xdo->dev = NULL;
- dev_put(dev);
+ netdev_put(dev, &xdo->dev_tracker);
NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
return -EINVAL;
}
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index 35279c220bd7..1f99dc469027 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -310,52 +310,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
skb->mark = 0;
}
-static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type, unsigned short family)
-{
- struct sec_path *sp;
-
- sp = skb_sec_path(skb);
- if (sp && (sp->len || sp->olen) &&
- !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
- goto discard;
-
- XFRM_SPI_SKB_CB(skb)->family = family;
- if (family == AF_INET) {
- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
- XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
- } else {
- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
- XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
- }
-
- return xfrm_input(skb, nexthdr, spi, encap_type);
-discard:
- kfree_skb(skb);
- return 0;
-}
-
-static int xfrmi4_rcv(struct sk_buff *skb)
-{
- return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
-}
-
-static int xfrmi6_rcv(struct sk_buff *skb)
-{
- return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
- 0, 0, AF_INET6);
-}
-
-static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
-{
- return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
-}
-
-static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
-{
- return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
-}
-
static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
{
const struct xfrm_mode *inner_mode;
@@ -991,8 +945,8 @@ static struct pernet_operations xfrmi_net_ops = {
};
static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
- .handler = xfrmi6_rcv,
- .input_handler = xfrmi6_input,
+ .handler = xfrm6_rcv,
+ .input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi6_err,
.priority = 10,
@@ -1042,8 +996,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
#endif
static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
- .handler = xfrmi4_rcv,
- .input_handler = xfrmi4_input,
+ .handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
.cb_handler = xfrmi_rcv_cb,
.err_handler = xfrmi4_err,
.priority = 10,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5c61ec04b839..21a3a1cd3d6d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3712,12 +3712,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
xfrm_nr = ti;
- if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
- !xfrm_nr) {
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
- goto reject;
- }
-
if (npols > 1) {
xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
tpp = stp;
@@ -3745,9 +3739,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
goto reject;
}
- if (if_id)
- secpath_reset(skb);
-
xfrm_pols_put(pols, npols);
return 1;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d720e163ae6e..0e398a589536 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1980,6 +1980,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err) {
xfrm_dev_policy_delete(xp);
+ xfrm_dev_policy_free(xp);
security_xfrm_policy_free(xp->security);
kfree(xp);
return err;
diff --git a/samples/bpf/tcp_basertt_kern.c b/samples/bpf/tcp_basertt_kern.c
index 8dfe09a92fec..822b0742b815 100644
--- a/samples/bpf/tcp_basertt_kern.c
+++ b/samples/bpf/tcp_basertt_kern.c
@@ -47,7 +47,7 @@ int bpf_basertt(struct bpf_sock_ops *skops)
case BPF_SOCK_OPS_BASE_RTT:
n = bpf_getsockopt(skops, SOL_TCP, TCP_CONGESTION,
cong, sizeof(cong));
- if (!n && !__builtin_memcmp(cong, nv, sizeof(nv)+1)) {
+ if (!n && !__builtin_memcmp(cong, nv, sizeof(nv))) {
/* Set base_rtt to 80us */
rv = 80;
} else if (n) {
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index f8bd6178d17b..fc7ba95e86a0 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -155,6 +155,7 @@ aquired||acquired
aquisition||acquisition
arbitary||arbitrary
architechture||architecture
+archtecture||architecture
arguement||argument
arguements||arguments
arithmatic||arithmetic
@@ -279,6 +280,7 @@ cant'||can't
canot||cannot
cann't||can't
cannnot||cannot
+capabiity||capability
capabilites||capabilities
capabilties||capabilities
capabilty||capability
@@ -426,6 +428,7 @@ cotrol||control
cound||could
couter||counter
coutner||counter
+creationg||creating
cryptocraphic||cryptographic
cummulative||cumulative
cunter||counter
@@ -492,6 +495,7 @@ destorys||destroys
destroied||destroyed
detabase||database
deteced||detected
+detecion||detection
detectt||detect
detroyed||destroyed
develope||develop
@@ -513,6 +517,7 @@ diferent||different
differrence||difference
diffrent||different
differenciate||differentiate
+diffreential||differential
diffrentiate||differentiate
difinition||definition
digial||digital
@@ -617,6 +622,7 @@ evalute||evaluate
evalutes||evaluates
evalution||evaluation
excecutable||executable
+excceed||exceed
exceded||exceeded
exceds||exceeds
exceeed||exceed
@@ -632,6 +638,7 @@ existant||existent
exixt||exist
exsits||exists
exlcude||exclude
+exlcuding||excluding
exlcusive||exclusive
exlusive||exclusive
exmaple||example
@@ -726,6 +733,8 @@ generiously||generously
genereate||generate
genereted||generated
genric||generic
+gerenal||general
+geting||getting
globel||global
grabing||grabbing
grahical||graphical
@@ -899,6 +908,7 @@ iteraions||iterations
iternations||iterations
itertation||iteration
itslef||itself
+ivalid||invalid
jave||java
jeffies||jiffies
jumpimng||jumping
@@ -977,6 +987,7 @@ microprocesspr||microprocessor
migrateable||migratable
millenium||millennium
milliseonds||milliseconds
+minimim||minimum
minium||minimum
minimam||minimum
minimun||minimum
@@ -1042,6 +1053,7 @@ notifed||notified
notity||notify
nubmer||number
numebr||number
+numer||number
numner||number
nunber||number
obtaion||obtain
@@ -1061,6 +1073,7 @@ offet||offset
offlaod||offload
offloded||offloaded
offseting||offsetting
+oflload||offload
omited||omitted
omiting||omitting
omitt||omit
@@ -1105,6 +1118,7 @@ pakage||package
paket||packet
pallette||palette
paln||plan
+palne||plane
paramameters||parameters
paramaters||parameters
paramater||parameter
@@ -1181,12 +1195,14 @@ previsously||previously
primative||primitive
princliple||principle
priorty||priority
+priting||printing
privilaged||privileged
privilage||privilege
priviledge||privilege
priviledges||privileges
privleges||privileges
probaly||probably
+probabalistic||probabilistic
procceed||proceed
proccesors||processors
procesed||processed
@@ -1460,6 +1476,7 @@ submited||submitted
submition||submission
succeded||succeeded
suceed||succeed
+succesfuly||successfully
succesfully||successfully
succesful||successful
successed||succeeded
@@ -1503,6 +1520,7 @@ symetric||symmetric
synax||syntax
synchonized||synchronized
sychronization||synchronization
+sychronously||synchronously
synchronuously||synchronously
syncronize||synchronize
syncronized||synchronized
@@ -1532,6 +1550,7 @@ threee||three
threshhold||threshold
thresold||threshold
throught||through
+tansition||transition
trackling||tracking
troughput||throughput
trys||tries
@@ -1611,6 +1630,7 @@ unneccessary||unnecessary
unnecesary||unnecessary
unneedingly||unnecessarily
unnsupported||unsupported
+unuspported||unsupported
unmached||unmatched
unprecise||imprecise
unpriviledged||unprivileged
@@ -1657,6 +1677,7 @@ verfication||verification
veriosn||version
verisons||versions
verison||version
+veritical||vertical
verson||version
vicefersa||vice-versa
virtal||virtual
@@ -1677,6 +1698,7 @@ whenver||whenever
wheter||whether
whe||when
wierd||weird
+wihout||without
wiil||will
wirte||write
withing||within
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index b498ed302461..6724e2ff6da8 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -28,15 +28,15 @@ unsigned int aa_hash_size(void)
char *aa_calc_hash(void *data, size_t len)
{
SHASH_DESC_ON_STACK(desc, apparmor_tfm);
- char *hash = NULL;
- int error = -ENOMEM;
+ char *hash;
+ int error;
if (!apparmor_tfm)
return NULL;
hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
if (!hash)
- goto fail;
+ return ERR_PTR(-ENOMEM);
desc->tfm = apparmor_tfm;
@@ -62,7 +62,7 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
size_t len)
{
SHASH_DESC_ON_STACK(desc, apparmor_tfm);
- int error = -ENOMEM;
+ int error;
__le32 le32_version = cpu_to_le32(version);
if (!aa_g_hash_policy)
@@ -73,7 +73,7 @@ int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
if (!profile->hash)
- goto fail;
+ return -ENOMEM;
desc->tfm = apparmor_tfm;
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index f3715cda59c5..68598557aef5 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1446,7 +1446,7 @@ check:
}
/* full transition will be built in exec path */
- error = aa_set_current_onexec(target, stack);
+ aa_set_current_onexec(target, stack);
}
audit:
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 9119ddda6217..698b124e649f 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -161,6 +161,7 @@ static int path_name(const char *op, struct aa_label *label,
return 0;
}
+struct aa_perms default_perms = {};
/**
* aa_lookup_fperms - convert dfa compressed perms to internal perms
* @dfa: dfa to lookup perms for (NOT NULL)
@@ -171,7 +172,6 @@ static int path_name(const char *op, struct aa_label *label,
*
* Returns: a pointer to a file permission set
*/
-struct aa_perms default_perms = {};
struct aa_perms *aa_lookup_fperms(struct aa_policydb *file_rules,
aa_state_t state, struct path_cond *cond)
{
diff --git a/security/apparmor/include/task.h b/security/apparmor/include/task.h
index 13437d62c70f..01717fe432c3 100644
--- a/security/apparmor/include/task.h
+++ b/security/apparmor/include/task.h
@@ -30,7 +30,7 @@ struct aa_task_ctx {
};
int aa_replace_current_label(struct aa_label *label);
-int aa_set_current_onexec(struct aa_label *label, bool stack);
+void aa_set_current_onexec(struct aa_label *label, bool stack);
int aa_set_current_hat(struct aa_label *label, u64 token);
int aa_restore_previous_label(u64 cookie);
struct aa_label *aa_get_task_label(struct task_struct *task);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 51e8184e0fec..a8fcc7291a75 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -430,11 +430,9 @@ static struct aa_policy *__lookup_parent(struct aa_ns *ns,
* @hname: hierarchical profile name to find parent of (NOT NULL)
* @gfp: type of allocation.
*
- * Returns: NULL on error, parent profile on success
- *
* Requires: ns mutex lock held
*
- * Returns: unrefcounted parent policy or NULL if error creating
+ * Return: unrefcounted parent policy on success or %NULL if error creating
* place holder profiles.
*/
static struct aa_policy *__create_missing_ancestors(struct aa_ns *ns,
@@ -828,7 +826,7 @@ bool aa_current_policy_admin_capable(struct aa_ns *ns)
/**
* aa_may_manage_policy - can the current task manage policy
* @label: label to check if it can manage policy
- * @op: the policy manipulation operation being done
+ * @mask: contains the policy manipulation operation being done
*
* Returns: 0 if the task is allowed to manipulate policy else error
*/
@@ -883,7 +881,6 @@ static struct aa_profile *__list_lookup_parent(struct list_head *lh,
* __replace_profile - replace @old with @new on a list
* @old: profile to be replaced (NOT NULL)
* @new: profile to replace @old with (NOT NULL)
- * @share_proxy: transfer @old->proxy to @new
*
* Will duplicate and refcount elements that @new inherits from @old
* and will inherit @old children.
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index cf2ceec40b28..dafff0dfda8f 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -448,7 +448,7 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags)
/**
* unpack_trans_table - unpack a profile transition table
* @e: serialized data extent information (NOT NULL)
- * @table: str table to unpack to (NOT NULL)
+ * @strs: str table to unpack to (NOT NULL)
*
* Returns: true if table successfully unpacked or not present
*/
@@ -1046,8 +1046,13 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
- rhashtable_insert_fast(profile->data, &data->head,
- profile->data->p);
+ if (rhashtable_insert_fast(profile->data, &data->head,
+ profile->data->p)) {
+ kfree_sensitive(data->key);
+ kfree_sensitive(data);
+ info = "failed to insert data to table";
+ goto fail;
+ }
}
if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c
index e1bfdab524b7..5c9bde25e56d 100644
--- a/security/apparmor/policy_unpack_test.c
+++ b/security/apparmor/policy_unpack_test.c
@@ -69,31 +69,30 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_STRING_NAME) + 1;
- strcpy(buf + 3, TEST_STRING_NAME);
+ strscpy(buf + 3, TEST_STRING_NAME, e->end - (void *)(buf + 3));
buf = e->start + TEST_STRING_BUF_OFFSET;
*buf = AA_STRING;
*(buf + 1) = strlen(TEST_STRING_DATA) + 1;
- strcpy(buf + 3, TEST_STRING_DATA);
-
+ strscpy(buf + 3, TEST_STRING_DATA, e->end - (void *)(buf + 3));
buf = e->start + TEST_NAMED_U32_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_U32_NAME) + 1;
- strcpy(buf + 3, TEST_U32_NAME);
+ strscpy(buf + 3, TEST_U32_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U32_NAME) + 1) = AA_U32;
*((u32 *)(buf + 3 + strlen(TEST_U32_NAME) + 2)) = TEST_U32_DATA;
buf = e->start + TEST_NAMED_U64_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_U64_NAME) + 1;
- strcpy(buf + 3, TEST_U64_NAME);
+ strscpy(buf + 3, TEST_U64_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_U64_NAME) + 1) = AA_U64;
*((u64 *)(buf + 3 + strlen(TEST_U64_NAME) + 2)) = TEST_U64_DATA;
buf = e->start + TEST_NAMED_BLOB_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_BLOB_NAME) + 1;
- strcpy(buf + 3, TEST_BLOB_NAME);
+ strscpy(buf + 3, TEST_BLOB_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_BLOB_NAME) + 1) = AA_BLOB;
*(buf + 3 + strlen(TEST_BLOB_NAME) + 2) = TEST_BLOB_DATA_SIZE;
memcpy(buf + 3 + strlen(TEST_BLOB_NAME) + 6,
@@ -102,7 +101,7 @@ static struct aa_ext *build_aa_ext_struct(struct policy_unpack_fixture *puf,
buf = e->start + TEST_NAMED_ARRAY_BUF_OFFSET;
*buf = AA_NAME;
*(buf + 1) = strlen(TEST_ARRAY_NAME) + 1;
- strcpy(buf + 3, TEST_ARRAY_NAME);
+ strscpy(buf + 3, TEST_ARRAY_NAME, e->end - (void *)(buf + 3));
*(buf + 3 + strlen(TEST_ARRAY_NAME) + 1) = AA_ARRAY;
*((u16 *)(buf + 3 + strlen(TEST_ARRAY_NAME) + 2)) = TEST_ARRAY_SIZE;
diff --git a/security/apparmor/secid.c b/security/apparmor/secid.c
index 24a0e23f1b2b..83d3d1e6d9dc 100644
--- a/security/apparmor/secid.c
+++ b/security/apparmor/secid.c
@@ -53,8 +53,7 @@ void aa_secid_update(u32 secid, struct aa_label *label)
xa_unlock_irqrestore(&aa_secids, flags);
}
-/**
- *
+/*
* see label for inverse aa_label_to_secid
*/
struct aa_label *aa_secid_to_label(u32 secid)
diff --git a/security/apparmor/task.c b/security/apparmor/task.c
index 84d16a29bfcb..5671a716fcd2 100644
--- a/security/apparmor/task.c
+++ b/security/apparmor/task.c
@@ -93,9 +93,8 @@ int aa_replace_current_label(struct aa_label *label)
* aa_set_current_onexec - set the tasks change_profile to happen onexec
* @label: system label to set at exec (MAYBE NULL to clear value)
* @stack: whether stacking should be done
- * Returns: 0 or error on failure
*/
-int aa_set_current_onexec(struct aa_label *label, bool stack)
+void aa_set_current_onexec(struct aa_label *label, bool stack)
{
struct aa_task_ctx *ctx = task_ctx(current);
@@ -103,8 +102,6 @@ int aa_set_current_onexec(struct aa_label *label, bool stack)
aa_put_label(ctx->onexec);
ctx->onexec = label;
ctx->token = stack;
-
- return 0;
}
/**
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 0aecf9334ec3..ab8c3093d5fd 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -5,26 +5,25 @@
obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
+ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
+
selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
netnode.o netport.o status.o \
ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/context.o
selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
-
selinux-$(CONFIG_NETLABEL) += netlabel.o
-
selinux-$(CONFIG_SECURITY_INFINIBAND) += ibpkey.o
-
selinux-$(CONFIG_IMA) += ima.o
-ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
+genhdrs := flask.h av_permissions.h
-$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
+$(addprefix $(obj)/,$(selinux-y)): $(addprefix $(obj)/,$(genhdrs))
-quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
- cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
+quiet_cmd_genhdrs = GEN $(addprefix $(obj)/,$(genhdrs))
+ cmd_genhdrs = $< $(addprefix $(obj)/,$(genhdrs))
-targets += flask.h av_permissions.h
-$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
- $(call if_changed,flask)
+targets += $(genhdrs)
+$(addprefix $(obj)/,$(genhdrs)) &: scripts/selinux/genheaders/genheaders FORCE
+ $(call if_changed,genhdrs)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index eaed5c2da02b..1074db66e5ff 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -642,7 +642,6 @@ static void avc_insert(u32 ssid, u32 tsid, u16 tclass,
hlist_add_head_rcu(&node->list, head);
found:
spin_unlock_irqrestore(lock, flag);
- return;
}
/**
@@ -1203,22 +1202,3 @@ u32 avc_policy_seqno(void)
{
return selinux_avc.avc_cache.latest_notif;
}
-
-void avc_disable(void)
-{
- /*
- * If you are looking at this because you have realized that we are
- * not destroying the avc_node_cachep it might be easy to fix, but
- * I don't know the memory barrier semantics well enough to know. It's
- * possible that some other task dereferenced security_ops when
- * it still pointed to selinux operations. If that is the case it's
- * possible that it is about to use the avc and is about to need the
- * avc_node_cachep. I know I could wrap the security.c security_ops call
- * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
- * the cache and get that memory back.
- */
- if (avc_node_cachep) {
- avc_flush();
- /* kmem_cache_destroy(avc_node_cachep); */
- }
-}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 79b4890e9936..99ded60a6b91 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -357,7 +357,7 @@ enum {
};
#define A(s, has_arg) {#s, sizeof(#s) - 1, Opt_##s, has_arg}
-static struct {
+static const struct {
const char *name;
int len;
int opt;
@@ -937,7 +937,7 @@ out:
}
/*
- * NOTE: the caller is resposible for freeing the memory even if on error.
+ * NOTE: the caller is responsible for freeing the memory even if on error.
*/
static int selinux_add_opt(int token, const char *s, void **mnt_opts)
{
diff --git a/security/selinux/ima.c b/security/selinux/ima.c
index 7daf59667f59..aa34da9b0aeb 100644
--- a/security/selinux/ima.c
+++ b/security/selinux/ima.c
@@ -4,7 +4,7 @@
*
* Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
*
- * Measure critical data structures maintainted by SELinux
+ * Measure critical data structures maintained by SELinux
* using IMA subsystem.
*/
#include <linux/vmalloc.h>
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index 406bceb90c6c..d5495134a5b9 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -41,7 +41,7 @@ void selinux_audit_rule_free(void *rule);
* selinux_audit_rule_match - determine if a context ID matches a rule.
* @sid: the context ID to check
* @field: the field this rule refers to
- * @op: the operater the rule uses
+ * @op: the operator the rule uses
* @rule: pointer to the audit rule to check against
*
* Returns 1 if the context id matches the rule, 0 if it does not, and
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 9301222c8e55..9e055f74daf6 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -168,9 +168,6 @@ int avc_get_hash_stats(char *page);
unsigned int avc_get_cache_threshold(void);
void avc_set_cache_threshold(unsigned int cache_threshold);
-/* Attempt to free avc node cache */
-void avc_disable(void);
-
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
#endif
diff --git a/security/selinux/include/ima.h b/security/selinux/include/ima.h
index 05e04172c86d..93c05e97eb7f 100644
--- a/security/selinux/include/ima.h
+++ b/security/selinux/include/ima.h
@@ -4,7 +4,7 @@
*
* Author: Lakshmi Ramasubramanian (nramas@linux.microsoft.com)
*
- * Measure critical data structures maintainted by SELinux
+ * Measure critical data structures maintained by SELinux
* using IMA subsystem.
*/
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 69a583b91fc5..bad1f6b685fd 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -951,7 +951,7 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
* either whitespace or multibyte characters, they shall be
* encoded based on the percentage-encoding rule.
* If not encoded, the sscanf logic picks up only left-half
- * of the supplied name; splitted by a whitespace unexpectedly.
+ * of the supplied name; split by a whitespace unexpectedly.
*/
char *r, *w;
int c1, c2;
@@ -1649,7 +1649,7 @@ static int sel_make_ss_files(struct dentry *dir)
struct super_block *sb = dir->d_sb;
struct selinux_fs_info *fsi = sb->s_fs_info;
int i;
- static struct tree_descr files[] = {
+ static const struct tree_descr files[] = {
{ "sidtab_hash_stats", &sel_sidtab_hash_stats_ops, S_IRUGO },
};
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 8480ec6c6e75..6766edc0fe68 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -354,7 +354,7 @@ int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
return avtab_alloc_common(new, orig->nslot);
}
-void avtab_hash_eval(struct avtab *h, char *tag)
+void avtab_hash_eval(struct avtab *h, const char *tag)
{
int i, chain_len, slots_used, max_chain_len;
unsigned long long chain2_len_sum;
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index d3ebea8d146f..d6742fd9c560 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -92,7 +92,7 @@ int avtab_alloc(struct avtab *, u32);
int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
struct avtab_datum *avtab_search(struct avtab *h, const struct avtab_key *k);
void avtab_destroy(struct avtab *h);
-void avtab_hash_eval(struct avtab *h, char *tag);
+void avtab_hash_eval(struct avtab *h, const char *tag);
struct policydb;
int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index eda32c3d4c0a..44179977f434 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -167,6 +167,7 @@ static inline int context_cpy(struct context *dst, const struct context *src)
rc = mls_context_cpy(dst, src);
if (rc) {
kfree(dst->str);
+ dst->str = NULL;
return rc;
}
return 0;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index adcfb63b3550..97c0074f9312 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -42,7 +42,7 @@
#include "services.h"
#ifdef DEBUG_HASHES
-static const char *symtab_name[SYM_NUM] = {
+static const char *const symtab_name[SYM_NUM] = {
"common prefixes",
"classes",
"roles",
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index f14d1ffe54c5..78946b71c1c1 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -583,7 +583,7 @@ static void type_attribute_bounds_av(struct policydb *policydb,
/*
* flag which drivers have permissions
- * only looking for ioctl based extended permssions
+ * only looking for ioctl based extended permissions
*/
void services_compute_xperms_drivers(
struct extended_perms *xperms,
@@ -3541,38 +3541,38 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
if (!tmprule)
return -ENOMEM;
-
context_init(&tmprule->au_ctxt);
rcu_read_lock();
policy = rcu_dereference(state->policy);
policydb = &policy->policydb;
-
tmprule->au_seqno = policy->latest_granting;
-
switch (field) {
case AUDIT_SUBJ_USER:
case AUDIT_OBJ_USER:
- rc = -EINVAL;
userdatum = symtab_search(&policydb->p_users, rulestr);
- if (!userdatum)
- goto out;
+ if (!userdatum) {
+ rc = -EINVAL;
+ goto err;
+ }
tmprule->au_ctxt.user = userdatum->value;
break;
case AUDIT_SUBJ_ROLE:
case AUDIT_OBJ_ROLE:
- rc = -EINVAL;
roledatum = symtab_search(&policydb->p_roles, rulestr);
- if (!roledatum)
- goto out;
+ if (!roledatum) {
+ rc = -EINVAL;
+ goto err;
+ }
tmprule->au_ctxt.role = roledatum->value;
break;
case AUDIT_SUBJ_TYPE:
case AUDIT_OBJ_TYPE:
- rc = -EINVAL;
typedatum = symtab_search(&policydb->p_types, rulestr);
- if (!typedatum)
- goto out;
+ if (!typedatum) {
+ rc = -EINVAL;
+ goto err;
+ }
tmprule->au_ctxt.type = typedatum->value;
break;
case AUDIT_SUBJ_SEN:
@@ -3582,20 +3582,18 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
rc = mls_from_string(policydb, rulestr, &tmprule->au_ctxt,
GFP_ATOMIC);
if (rc)
- goto out;
+ goto err;
break;
}
- rc = 0;
-out:
rcu_read_unlock();
- if (rc) {
- selinux_audit_rule_free(tmprule);
- tmprule = NULL;
- }
-
*rule = tmprule;
+ return 0;
+err:
+ rcu_read_unlock();
+ selinux_audit_rule_free(tmprule);
+ *rule = NULL;
return rc;
}
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 192208c291d6..068cb6624e36 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1310,7 +1310,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
/* Does NOT support sync daughter card (obviously). */
/* Tested by James@superbug.co.uk 4th Nov 2007. */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x42011102,
- .driver = "Audigy2", .name = "E-mu 1010 Notebook [MAEM8950]",
+ .driver = "Audigy2", .name = "E-MU 02 CardBus [MAEM8950]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0108_chip = 1,
@@ -1323,7 +1323,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
* MicroDock[M] to make it an E-MU 1616[m]. */
/* Does NOT support sync daughter card. */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40041102,
- .driver = "Audigy2", .name = "E-mu 1010b PCI [MAEM8960]",
+ .driver = "Audigy2", .name = "E-MU 1010b PCI [MAEM8960]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0108_chip = 1,
@@ -1337,7 +1337,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
* still work. */
/* Does NOT support sync daughter card. */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
- .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
+ .driver = "Audigy2", .name = "E-MU 1010 PCIe [MAEM8986]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0108_chip = 1,
@@ -1349,7 +1349,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
* AudioDock[M] to make it an E-MU 1820[m]. */
/* Supports sync daughter card. */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
- .driver = "Audigy2", .name = "E-mu 1010 [MAEM8810]",
+ .driver = "Audigy2", .name = "E-MU 1010 [MAEM8810]",
.id = "EMU1010",
.emu10k2_chip = 1,
.ca0102_chip = 1,
@@ -1359,7 +1359,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
/* Supports sync daughter card. */
/* Tested by oswald.buddenhagen@gmx.de Mar 2023. */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40021102,
- .driver = "Audigy2", .name = "E-mu 0404b PCI [MAEM8852]",
+ .driver = "Audigy2", .name = "E-MU 0404b PCI [MAEM8852]",
.id = "EMU0404",
.emu10k2_chip = 1,
.ca0108_chip = 1,
@@ -1369,7 +1369,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
/* Supports sync daughter card. */
/* Tested by James@superbug.co.uk 20-3-2007. */
{.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40021102,
- .driver = "Audigy2", .name = "E-mu 0404 [MAEM8850]",
+ .driver = "Audigy2", .name = "E-MU 0404 [MAEM8850]",
.id = "EMU0404",
.emu10k2_chip = 1,
.ca0102_chip = 1,
@@ -1378,7 +1378,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
/* EMU0404 PCIe */
/* Does NOT support sync daughter card. */
{.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
- .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
+ .driver = "Audigy2", .name = "E-MU 0404 PCIe [MAEM8984]",
.id = "EMU0404",
.emu10k2_chip = 1,
.ca0108_chip = 1,
@@ -1645,7 +1645,7 @@ static const struct snd_emu_chip_details emu_chip_details[] = {
.ac97_chip = 1,
.sblive51 = 1} ,
{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x40011102,
- .driver = "EMU10K1", .name = "E-mu APS [PC545]",
+ .driver = "EMU10K1", .name = "E-MU APS [PC545]",
.id = "APS",
.emu10k1_chip = 1,
.ecard = 1} ,
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 3f64ccab0e63..98785110ef63 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -318,16 +318,12 @@ static int snd_emu10k1_gpr_ctl_info(struct snd_kcontrol *kcontrol, struct snd_ct
static int snd_emu10k1_gpr_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
- struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_fx8010_ctl *ctl =
(struct snd_emu10k1_fx8010_ctl *) kcontrol->private_value;
- unsigned long flags;
unsigned int i;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (i = 0; i < ctl->vcount; i++)
ucontrol->value.integer.value[i] = ctl->value[i];
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -336,12 +332,10 @@ static int snd_emu10k1_gpr_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_fx8010_ctl *ctl =
(struct snd_emu10k1_fx8010_ctl *) kcontrol->private_value;
- unsigned long flags;
unsigned int nval, val;
unsigned int i, j;
int change = 0;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (i = 0; i < ctl->vcount; i++) {
nval = ucontrol->value.integer.value[i];
if (nval < ctl->min)
@@ -380,7 +374,6 @@ static int snd_emu10k1_gpr_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl
}
}
__error:
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return change;
}
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 3ebc7c36a444..610700be1e37 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -41,17 +41,14 @@ static int snd_emu10k1_spdif_get(struct snd_kcontrol *kcontrol,
{
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
- unsigned long flags;
/* Limit: emu->spdif_bits */
if (idx >= 3)
return -EINVAL;
- spin_lock_irqsave(&emu->reg_lock, flags);
ucontrol->value.iec958.status[0] = (emu->spdif_bits[idx] >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (emu->spdif_bits[idx] >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (emu->spdif_bits[idx] >> 16) & 0xff;
ucontrol->value.iec958.status[3] = (emu->spdif_bits[idx] >> 24) & 0xff;
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -81,16 +78,16 @@ static const char * const emu1010_src_texts[] = {
"Dock ADC3 Right",
"0202 ADC Left",
"0202 ADC Right",
- "0202 SPDIF Left",
- "0202 SPDIF Right",
- "ADAT 0",
- "ADAT 1",
- "ADAT 2",
- "ADAT 3",
- "ADAT 4",
- "ADAT 5",
- "ADAT 6",
- "ADAT 7",
+ "1010 SPDIF Left",
+ "1010 SPDIF Right",
+ "1010 ADAT 0",
+ "1010 ADAT 1",
+ "1010 ADAT 2",
+ "1010 ADAT 3",
+ "1010 ADAT 4",
+ "1010 ADAT 5",
+ "1010 ADAT 6",
+ "1010 ADAT 7",
"DSP 0",
"DSP 1",
"DSP 2",
@@ -129,14 +126,14 @@ static const char * const emu1010_src_texts[] = {
static const char * const emu1616_src_texts[] = {
"Silence",
- "Dock Mic A",
- "Dock Mic B",
- "Dock ADC1 Left",
- "Dock ADC1 Right",
- "Dock ADC2 Left",
- "Dock ADC2 Right",
- "Dock SPDIF Left",
- "Dock SPDIF Right",
+ "Mic A",
+ "Mic B",
+ "ADC1 Left",
+ "ADC1 Right",
+ "ADC2 Left",
+ "ADC2 Right",
+ "SPDIF Left",
+ "SPDIF Right",
"ADAT 0",
"ADAT 1",
"ADAT 2",
@@ -1070,10 +1067,7 @@ static int snd_audigy_spdif_output_rate_get(struct snd_kcontrol *kcontrol,
{
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
unsigned int tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&emu->reg_lock, flags);
tmp = snd_emu10k1_ptr_read(emu, A_SPDIF_SAMPLERATE, 0);
switch (tmp & A_SPDIF_RATE_MASK) {
case A_SPDIF_44100:
@@ -1088,7 +1082,6 @@ static int snd_audigy_spdif_output_rate_get(struct snd_kcontrol *kcontrol,
default:
ucontrol->value.enumerated.item[0] = 1;
}
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1146,7 +1139,6 @@ static int snd_emu10k1_spdif_put(struct snd_kcontrol *kcontrol,
unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
int change;
unsigned int val;
- unsigned long flags;
/* Limit: emu->spdif_bits */
if (idx >= 3)
@@ -1155,13 +1147,11 @@ static int snd_emu10k1_spdif_put(struct snd_kcontrol *kcontrol,
(ucontrol->value.iec958.status[1] << 8) |
(ucontrol->value.iec958.status[2] << 16) |
(ucontrol->value.iec958.status[3] << 24);
- spin_lock_irqsave(&emu->reg_lock, flags);
change = val != emu->spdif_bits[idx];
if (change) {
snd_emu10k1_ptr_write(emu, SPCS0 + idx, 0, val);
emu->spdif_bits[idx] = val;
}
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return change;
}
@@ -1229,7 +1219,6 @@ static int snd_emu10k1_send_routing_info(struct snd_kcontrol *kcontrol, struct s
static int snd_emu10k1_send_routing_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- unsigned long flags;
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_pcm_mixer *mix =
&emu->pcm_mixer[snd_ctl_get_ioffidx(kcontrol, &ucontrol->id)];
@@ -1237,12 +1226,10 @@ static int snd_emu10k1_send_routing_get(struct snd_kcontrol *kcontrol,
int num_efx = emu->audigy ? 8 : 4;
int mask = emu->audigy ? 0x3f : 0x0f;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (voice = 0; voice < 3; voice++)
for (idx = 0; idx < num_efx; idx++)
ucontrol->value.integer.value[(voice * num_efx) + idx] =
mix->send_routing[voice][idx] & mask;
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1305,17 +1292,14 @@ static int snd_emu10k1_send_volume_info(struct snd_kcontrol *kcontrol, struct sn
static int snd_emu10k1_send_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- unsigned long flags;
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_pcm_mixer *mix =
&emu->pcm_mixer[snd_ctl_get_ioffidx(kcontrol, &ucontrol->id)];
int idx;
int num_efx = emu->audigy ? 8 : 4;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (idx = 0; idx < 3*num_efx; idx++)
ucontrol->value.integer.value[idx] = mix->send_volume[idx/num_efx][idx%num_efx];
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1378,13 +1362,10 @@ static int snd_emu10k1_attn_get(struct snd_kcontrol *kcontrol,
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_pcm_mixer *mix =
&emu->pcm_mixer[snd_ctl_get_ioffidx(kcontrol, &ucontrol->id)];
- unsigned long flags;
int idx;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (idx = 0; idx < 3; idx++)
ucontrol->value.integer.value[idx] = mix->attn[idx];
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1443,7 +1424,6 @@ static int snd_emu10k1_efx_send_routing_info(struct snd_kcontrol *kcontrol, stru
static int snd_emu10k1_efx_send_routing_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- unsigned long flags;
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_pcm_mixer *mix =
&emu->efx_pcm_mixer[snd_ctl_get_ioffidx(kcontrol, &ucontrol->id)];
@@ -1451,11 +1431,9 @@ static int snd_emu10k1_efx_send_routing_get(struct snd_kcontrol *kcontrol,
int num_efx = emu->audigy ? 8 : 4;
int mask = emu->audigy ? 0x3f : 0x0f;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (idx = 0; idx < num_efx; idx++)
ucontrol->value.integer.value[idx] =
mix->send_routing[0][idx] & mask;
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1513,17 +1491,14 @@ static int snd_emu10k1_efx_send_volume_info(struct snd_kcontrol *kcontrol, struc
static int snd_emu10k1_efx_send_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- unsigned long flags;
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_pcm_mixer *mix =
&emu->efx_pcm_mixer[snd_ctl_get_ioffidx(kcontrol, &ucontrol->id)];
int idx;
int num_efx = emu->audigy ? 8 : 4;
- spin_lock_irqsave(&emu->reg_lock, flags);
for (idx = 0; idx < num_efx; idx++)
ucontrol->value.integer.value[idx] = mix->send_volume[0][idx];
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1582,11 +1557,8 @@ static int snd_emu10k1_efx_attn_get(struct snd_kcontrol *kcontrol,
struct snd_emu10k1 *emu = snd_kcontrol_chip(kcontrol);
struct snd_emu10k1_pcm_mixer *mix =
&emu->efx_pcm_mixer[snd_ctl_get_ioffidx(kcontrol, &ucontrol->id)];
- unsigned long flags;
- spin_lock_irqsave(&emu->reg_lock, flags);
ucontrol->value.integer.value[0] = mix->attn[0];
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
@@ -1654,7 +1626,7 @@ static int snd_emu10k1_shared_spdif_put(struct snd_kcontrol *kcontrol,
sw = ucontrol->value.integer.value[0];
if (emu->card_capabilities->invert_shared_spdif)
sw = !sw;
- spin_lock_irqsave(&emu->reg_lock, flags);
+ spin_lock_irqsave(&emu->emu_lock, flags);
if ( emu->card_capabilities->i2c_adc) {
/* Do nothing for Audigy 2 ZS Notebook */
} else if (emu->audigy) {
@@ -1675,7 +1647,7 @@ static int snd_emu10k1_shared_spdif_put(struct snd_kcontrol *kcontrol,
reg |= val;
outl(reg | val, emu->port + HCFG);
}
- spin_unlock_irqrestore(&emu->reg_lock, flags);
+ spin_unlock_irqrestore(&emu->emu_lock, flags);
return change;
}
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index e8d2f0f6fbb3..5ed404e8ed39 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1433,10 +1433,8 @@ static int snd_emu10k1_pcm_efx_voices_mask_get(struct snd_kcontrol *kcontrol, st
int nefx = emu->audigy ? 64 : 32;
int idx;
- spin_lock_irq(&emu->reg_lock);
for (idx = 0; idx < nefx; idx++)
ucontrol->value.integer.value[idx] = (emu->efx_voices_mask[idx / 32] & (1 << (idx % 32))) ? 1 : 0;
- spin_unlock_irq(&emu->reg_lock);
return 0;
}
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index bec72dc60a41..c92253de881e 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -399,13 +399,10 @@ static void snd_emu_proc_io_reg_read(struct snd_info_entry *entry,
{
struct snd_emu10k1 *emu = entry->private_data;
unsigned long value;
- unsigned long flags;
int i;
snd_iprintf(buffer, "IO Registers:\n\n");
for(i = 0; i < 0x40; i+=4) {
- spin_lock_irqsave(&emu->emu_lock, flags);
value = inl(emu->port + i);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "%02X: %08lX\n", i, value);
}
}
@@ -414,16 +411,13 @@ static void snd_emu_proc_io_reg_write(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_emu10k1 *emu = entry->private_data;
- unsigned long flags;
char line[64];
u32 reg, val;
while (!snd_info_get_line(buffer, line, sizeof(line))) {
if (sscanf(line, "%x %x", &reg, &val) != 2)
continue;
if (reg < 0x40 && val <= 0xffffffff) {
- spin_lock_irqsave(&emu->emu_lock, flags);
outl(val, emu->port + (reg & 0xfffffffc));
- spin_unlock_irqrestore(&emu->emu_lock, flags);
}
}
}
diff --git a/sound/pci/emu10k1/io.c b/sound/pci/emu10k1/io.c
index cfb96a67aa35..aee84c3f9f37 100644
--- a/sound/pci/emu10k1/io.c
+++ b/sound/pci/emu10k1/io.c
@@ -233,16 +233,13 @@ int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu,
return err;
}
-void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value)
+static void snd_emu1010_fpga_write_locked(struct snd_emu10k1 *emu, u32 reg, u32 value)
{
- unsigned long flags;
-
if (snd_BUG_ON(reg > 0x3f))
return;
reg += 0x40; /* 0x40 upwards are registers. */
if (snd_BUG_ON(value > 0x3f)) /* 0 to 0x3f are values */
return;
- spin_lock_irqsave(&emu->emu_lock, flags);
outw(reg, emu->port + A_GPIO);
udelay(10);
outw(reg | 0x80, emu->port + A_GPIO); /* High bit clocks the value into the fpga. */
@@ -250,6 +247,14 @@ void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value)
outw(value, emu->port + A_GPIO);
udelay(10);
outw(value | 0x80 , emu->port + A_GPIO); /* High bit clocks the value into the fpga. */
+}
+
+void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_write_locked(emu, reg, value);
spin_unlock_irqrestore(&emu->emu_lock, flags);
}
@@ -276,14 +281,18 @@ void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value)
*/
void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 src)
{
+ unsigned long flags;
+
if (snd_BUG_ON(dst & ~0x71f))
return;
if (snd_BUG_ON(src & ~0x71f))
return;
- snd_emu1010_fpga_write(emu, EMU_HANA_DESTHI, dst >> 8);
- snd_emu1010_fpga_write(emu, EMU_HANA_DESTLO, dst & 0x1f);
- snd_emu1010_fpga_write(emu, EMU_HANA_SRCHI, src >> 8);
- snd_emu1010_fpga_write(emu, EMU_HANA_SRCLO, src & 0x1f);
+ spin_lock_irqsave(&emu->emu_lock, flags);
+ snd_emu1010_fpga_write_locked(emu, EMU_HANA_DESTHI, dst >> 8);
+ snd_emu1010_fpga_write_locked(emu, EMU_HANA_DESTLO, dst & 0x1f);
+ snd_emu1010_fpga_write_locked(emu, EMU_HANA_SRCHI, src >> 8);
+ snd_emu1010_fpga_write_locked(emu, EMU_HANA_SRCLO, src & 0x1f);
+ spin_unlock_irqrestore(&emu->emu_lock, flags);
}
void snd_emu10k1_intr_enable(struct snd_emu10k1 *emu, unsigned int intrenb)
diff --git a/sound/pci/emu10k1/timer.c b/sound/pci/emu10k1/timer.c
index 2435d3ba68f7..993663fef885 100644
--- a/sound/pci/emu10k1/timer.c
+++ b/sound/pci/emu10k1/timer.c
@@ -18,29 +18,23 @@
static int snd_emu10k1_timer_start(struct snd_timer *timer)
{
struct snd_emu10k1 *emu;
- unsigned long flags;
unsigned int delay;
emu = snd_timer_chip(timer);
delay = timer->sticks - 1;
if (delay < 5 ) /* minimum time is 5 ticks */
delay = 5;
- spin_lock_irqsave(&emu->reg_lock, flags);
snd_emu10k1_intr_enable(emu, INTE_INTERVALTIMERENB);
outw(delay & TIMER_RATE_MASK, emu->port + TIMER);
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
static int snd_emu10k1_timer_stop(struct snd_timer *timer)
{
struct snd_emu10k1 *emu;
- unsigned long flags;
emu = snd_timer_chip(timer);
- spin_lock_irqsave(&emu->reg_lock, flags);
snd_emu10k1_intr_disable(emu, INTE_INTERVALTIMERENB);
- spin_unlock_irqrestore(&emu->reg_lock, flags);
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 172ffc2c332b..bc062c0a1916 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9522,6 +9522,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
@@ -9618,6 +9619,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x7724, "Clevo L140AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -11663,6 +11665,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 0bc6e4066d0f..4406a5def076 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -311,6 +311,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "8A22"),
}
},
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
+ }
+ },
{}
};
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index acdf98b2ee9c..ca20cade6840 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -132,13 +132,13 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
/* Error Handling: TX */
if (isr[i] & ISR_TXFO) {
- dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
+ dev_err_ratelimited(dev->dev, "TX overrun (ch_id=%d)\n", i);
irq_valid = true;
}
/* Error Handling: TX */
if (isr[i] & ISR_RXFO) {
- dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
+ dev_err_ratelimited(dev->dev, "RX overrun (ch_id=%d)\n", i);
irq_valid = true;
}
}
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index d98dbc50cf4c..243b74e18e51 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -212,7 +212,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
case BPF_LINK_TYPE_NETFILTER:
netfilter_dump_json(info, json_wtr);
break;
-
+ case BPF_LINK_TYPE_STRUCT_OPS:
+ jsonw_uint_field(json_wtr, "map_id",
+ info->struct_ops.map_id);
+ break;
default:
break;
}
@@ -245,7 +248,10 @@ static void show_link_header_plain(struct bpf_link_info *info)
else
printf("type %u ", info->type);
- printf("prog %u ", info->prog_id);
+ if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
+ printf("map %u ", info->struct_ops.map_id);
+ else
+ printf("prog %u ", info->prog_id);
}
static void show_link_attach_type_plain(__u32 attach_type)
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index aaeb8939e137..ae9e822aa3fe 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -139,6 +139,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
print_hex_data_json(key, info->key_size);
jsonw_name(json_wtr, "value");
print_hex_data_json(value, info->value_size);
+ if (map_is_map_of_maps(info->type))
+ jsonw_uint_field(json_wtr, "inner_map_id",
+ *(unsigned int *)value);
if (btf) {
struct btf_dumper d = {
.btf = btf,
@@ -259,8 +262,13 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
}
if (info->value_size) {
- printf("value:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, value, info->value_size, " ");
+ if (map_is_map_of_maps(info->type)) {
+ printf("inner_map_id:%c", break_names ? '\n' : ' ');
+ printf("%u ", *(unsigned int *)value);
+ } else {
+ printf("value:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, value, info->value_size, " ");
+ }
}
printf("\n");
diff --git a/tools/counter/.gitignore b/tools/counter/.gitignore
new file mode 100644
index 000000000000..9fd290d4bf43
--- /dev/null
+++ b/tools/counter/.gitignore
@@ -0,0 +1,2 @@
+/counter_example
+/include/linux/counter.h
diff --git a/tools/counter/Makefile b/tools/counter/Makefile
index 8843f0fa6119..a0f4cab71fe5 100644
--- a/tools/counter/Makefile
+++ b/tools/counter/Makefile
@@ -40,6 +40,7 @@ $(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/linux/counter.h
+ rmdir -p $(OUTPUT)include/linux
find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index 04739a6293c4..05a228a6ee78 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -99,11 +99,11 @@
#include "sys.h"
#include "ctype.h"
#include "signal.h"
+#include "unistd.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "time.h"
-#include "unistd.h"
#include "stackprotector.h"
/* Used by programs to avoid std includes */
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index 6fb3d0f9af17..be076a4041ab 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -351,6 +351,7 @@ struct pt_regs___arm64 {
* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
*/
+/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG a0
#define __PT_PARM2_REG a1
@@ -383,7 +384,7 @@ struct pt_regs___arm64 {
* https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
*/
-/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
+/* arc provides struct user_regs_struct instead of struct pt_regs to userspace */
#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
#define __PT_PARM1_REG scratch.r0
#define __PT_PARM2_REG scratch.r1
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 580985ee5545..4d9f30bf7f01 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -2250,9 +2250,25 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d,
const struct btf_type *t,
__u32 id,
const void *data,
- __u8 bits_offset)
+ __u8 bits_offset,
+ __u8 bit_sz)
{
- __s64 size = btf__resolve_size(d->btf, id);
+ __s64 size;
+
+ if (bit_sz) {
+ /* bits_offset is at most 7. bit_sz is at most 128. */
+ __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8;
+
+ /* When bit_sz is non zero, it is called from
+ * btf_dump_struct_data() where it only cares about
+ * negative error value.
+ * Return nr_bytes in success case to make it
+ * consistent as the regular integer case below.
+ */
+ return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes;
+ }
+
+ size = btf__resolve_size(d->btf, id);
if (size < 0 || size >= INT_MAX) {
pr_warn("unexpected size [%zu] for id [%u]\n",
@@ -2407,7 +2423,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d,
{
int size, err = 0;
- size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset);
+ size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz);
if (size < 0)
return size;
err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 777a0d8ba7d1..ff9a52e44688 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -130,7 +130,7 @@ OPTIONS
-F::
--fields::
Comma separated list of fields to print. Options are:
- comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
+ comm, tid, pid, time, cpu, event, trace, ip, sym, dso, dsoff, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output,
brstackinsn, brstackinsnlen, brstackoff, callindent, insn, insnlen, synth,
phys_addr, metric, misc, srccode, ipc, data_page_size, code_page_size, ins_lat,
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index b951374bc49d..4908d54dd33b 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -43,6 +43,20 @@ const char *const powerpc_triplets[] = {
NULL
};
+const char *const riscv32_triplets[] = {
+ "riscv32-unknown-linux-gnu-",
+ "riscv32-linux-android-",
+ "riscv32-linux-gnu-",
+ NULL
+};
+
+const char *const riscv64_triplets[] = {
+ "riscv64-unknown-linux-gnu-",
+ "riscv64-linux-android-",
+ "riscv64-linux-gnu-",
+ NULL
+};
+
const char *const s390_triplets[] = {
"s390-ibm-linux-",
"s390x-linux-gnu-",
@@ -164,6 +178,10 @@ static int perf_env__lookup_binutils_path(struct perf_env *env,
path_list = arm64_triplets;
else if (!strcmp(arch, "powerpc"))
path_list = powerpc_triplets;
+ else if (!strcmp(arch, "riscv32"))
+ path_list = riscv32_triplets;
+ else if (!strcmp(arch, "riscv64"))
+ path_list = riscv64_triplets;
else if (!strcmp(arch, "sh"))
path_list = sh_triplets;
else if (!strcmp(arch, "s390"))
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 006f522d0e7f..c7bf1ac14e90 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -133,6 +133,7 @@ enum perf_output_field {
PERF_OUTPUT_VCPU = 1ULL << 38,
PERF_OUTPUT_CGROUP = 1ULL << 39,
PERF_OUTPUT_RETIRE_LAT = 1ULL << 40,
+ PERF_OUTPUT_DSOFF = 1ULL << 41,
};
struct perf_script {
@@ -174,6 +175,7 @@ struct output_option {
{.str = "ip", .field = PERF_OUTPUT_IP},
{.str = "sym", .field = PERF_OUTPUT_SYM},
{.str = "dso", .field = PERF_OUTPUT_DSO},
+ {.str = "dsoff", .field = PERF_OUTPUT_DSOFF},
{.str = "addr", .field = PERF_OUTPUT_ADDR},
{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
@@ -574,6 +576,9 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
if (PRINT_FIELD(DSO))
output[type].print_ip_opts |= EVSEL__PRINT_DSO;
+ if (PRINT_FIELD(DSOFF))
+ output[type].print_ip_opts |= EVSEL__PRINT_DSOFF;
+
if (PRINT_FIELD(SYMOFFSET))
output[type].print_ip_opts |= EVSEL__PRINT_SYMOFFSET;
@@ -627,6 +632,10 @@ static int perf_session__check_output_opt(struct perf_session *session)
if (evsel == NULL)
continue;
+ /* 'dsoff' implys 'dso' field */
+ if (output[j].fields & PERF_OUTPUT_DSOFF)
+ output[j].fields |= PERF_OUTPUT_DSO;
+
set_print_ip_opts(&evsel->core.attr);
tod |= output[j].fields & PERF_OUTPUT_TOD;
}
@@ -929,18 +938,12 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
}
printed += fprintf(fp, " 0x%"PRIx64, from);
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, "(");
- printed += map__fprintf_dsoname(alf.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
printed += fprintf(fp, "/0x%"PRIx64, to);
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, "(");
- printed += map__fprintf_dsoname(alt.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
printed += print_bstack_flags(fp, entries + i);
}
@@ -972,18 +975,12 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, "(");
- printed += map__fprintf_dsoname(alf.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
printed += fprintf(fp, "%c", '/');
printed += symbol__fprintf_symname_offs(alt.sym, &alt, fp);
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, "(");
- printed += map__fprintf_dsoname(alt.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
printed += print_bstack_flags(fp, entries + i);
}
@@ -1019,17 +1016,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
to = map__dso_map_ip(alt.map, to);
printed += fprintf(fp, " 0x%"PRIx64, from);
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, "(");
- printed += map__fprintf_dsoname(alf.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(alf.map, PRINT_FIELD(DSOFF), alf.addr, fp);
printed += fprintf(fp, "/0x%"PRIx64, to);
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, "(");
- printed += map__fprintf_dsoname(alt.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(alt.map, PRINT_FIELD(DSOFF), alt.addr, fp);
printed += print_bstack_flags(fp, entries + i);
}
@@ -1393,11 +1384,8 @@ static int perf_sample__fprintf_addr(struct perf_sample *sample,
printed += symbol__fprintf_symname(al.sym, fp);
}
- if (PRINT_FIELD(DSO)) {
- printed += fprintf(fp, " (");
- printed += map__fprintf_dsoname(al.map, fp);
- printed += fprintf(fp, ")");
- }
+ if (PRINT_FIELD(DSO))
+ printed += map__fprintf_dsoname_dsoff(al.map, PRINT_FIELD(DSOFF), al.addr, fp);
out:
return printed;
}
@@ -3876,7 +3864,7 @@ int cmd_script(int argc, const char **argv)
"comma separated output fields prepend with 'type:'. "
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw,synth. "
- "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
+ "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,dsoff"
"addr,symoff,srcline,period,iregs,uregs,brstack,"
"brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
"brstackinsnlen,brstackoff,callindent,insn,insnlen,synth,"
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index a86614599269..046fbfcfdaab 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -67,6 +67,39 @@ char dso__symtab_origin(const struct dso *dso)
return origin[dso->symtab_type];
}
+bool dso__is_object_file(const struct dso *dso)
+{
+ switch (dso->binary_type) {
+ case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__BPF_PROG_INFO:
+ case DSO_BINARY_TYPE__BPF_IMAGE:
+ case DSO_BINARY_TYPE__OOL:
+ return false;
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
+ case DSO_BINARY_TYPE__DEBUGLINK:
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
+ case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+ case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
+ case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+ case DSO_BINARY_TYPE__GUEST_KMODULE:
+ case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
+ case DSO_BINARY_TYPE__KCORE:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+ case DSO_BINARY_TYPE__NOT_FOUND:
+ default:
+ return true;
+ }
+}
+
int dso__read_binary_type_filename(const struct dso *dso,
enum dso_binary_type type,
char *root_dir, char *filename, size_t size)
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 0b7c7633b9f6..b23a157c914d 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -379,23 +379,25 @@ void dso__reset_find_symbol_cache(struct dso *dso);
size_t dso__fprintf_symbols_by_name(struct dso *dso, FILE *fp);
size_t dso__fprintf(struct dso *dso, FILE *fp);
-static inline bool dso__is_vmlinux(struct dso *dso)
+static inline bool dso__is_vmlinux(const struct dso *dso)
{
return dso->binary_type == DSO_BINARY_TYPE__VMLINUX ||
dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
}
-static inline bool dso__is_kcore(struct dso *dso)
+static inline bool dso__is_kcore(const struct dso *dso)
{
return dso->binary_type == DSO_BINARY_TYPE__KCORE ||
dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
}
-static inline bool dso__is_kallsyms(struct dso *dso)
+static inline bool dso__is_kallsyms(const struct dso *dso)
{
return dso->kernel && dso->long_name[0] != '/';
}
+bool dso__is_object_file(const struct dso *dso);
+
void dso__free_a2l(struct dso *dso);
enum dso_type dso__type(struct dso *dso, struct machine *machine);
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index cc80ec554c0a..79e42d66f55b 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -116,6 +116,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
int print_ip = print_opts & EVSEL__PRINT_IP;
int print_sym = print_opts & EVSEL__PRINT_SYM;
int print_dso = print_opts & EVSEL__PRINT_DSO;
+ int print_dsoff = print_opts & EVSEL__PRINT_DSOFF;
int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
int print_oneline = print_opts & EVSEL__PRINT_ONELINE;
int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
@@ -171,11 +172,8 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
}
}
- if (print_dso && (!sym || !sym->inlined)) {
- printed += fprintf(fp, " (");
- printed += map__fprintf_dsoname(map, fp);
- printed += fprintf(fp, ")");
- }
+ if (print_dso && (!sym || !sym->inlined))
+ printed += map__fprintf_dsoname_dsoff(map, print_dsoff, addr, fp);
if (print_srcline)
printed += map__fprintf_srcline(map, addr, "\n ", fp);
@@ -209,6 +207,7 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
int print_ip = print_opts & EVSEL__PRINT_IP;
int print_sym = print_opts & EVSEL__PRINT_SYM;
int print_dso = print_opts & EVSEL__PRINT_DSO;
+ int print_dsoff = print_opts & EVSEL__PRINT_DSOFF;
int print_symoffset = print_opts & EVSEL__PRINT_SYMOFFSET;
int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
@@ -234,11 +233,8 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
}
}
- if (print_dso) {
- printed += fprintf(fp, " (");
- printed += map__fprintf_dsoname(al->map, fp);
- printed += fprintf(fp, ")");
- }
+ if (print_dso)
+ printed += map__fprintf_dsoname_dsoff(al->map, print_dsoff, al->addr, fp);
if (print_srcline)
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
diff --git a/tools/perf/util/evsel_fprintf.h b/tools/perf/util/evsel_fprintf.h
index 3093d096c29f..c8a9fac2f2dd 100644
--- a/tools/perf/util/evsel_fprintf.h
+++ b/tools/perf/util/evsel_fprintf.h
@@ -26,6 +26,7 @@ int evsel__fprintf(struct evsel *evsel, struct perf_attr_details *details, FILE
#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
+#define EVSEL__PRINT_DSOFF (1<<9)
struct addr_location;
struct perf_event_attr;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index b7f890950909..4d9944bbf5e4 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -431,14 +431,21 @@ size_t map__fprintf(struct map *map, FILE *fp)
map__start(map), map__end(map), map__pgoff(map), dso->name);
}
-size_t map__fprintf_dsoname(struct map *map, FILE *fp)
+static bool prefer_dso_long_name(const struct dso *dso, bool print_off)
+{
+ return dso->long_name &&
+ (symbol_conf.show_kernel_path ||
+ (print_off && (dso->name[0] == '[' || dso__is_kcore(dso))));
+}
+
+static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp)
{
char buf[symbol_conf.pad_output_len_dso + 1];
const char *dsoname = "[unknown]";
const struct dso *dso = map ? map__dso(map) : NULL;
if (dso) {
- if (symbol_conf.show_kernel_path && dso->long_name)
+ if (prefer_dso_long_name(dso, print_off))
dsoname = dso->long_name;
else
dsoname = dso->name;
@@ -452,6 +459,27 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
return fprintf(fp, "%s", dsoname);
}
+size_t map__fprintf_dsoname(struct map *map, FILE *fp)
+{
+ return __map__fprintf_dsoname(map, false, fp);
+}
+
+size_t map__fprintf_dsoname_dsoff(struct map *map, bool print_off, u64 addr, FILE *fp)
+{
+ const struct dso *dso = map ? map__dso(map) : NULL;
+ int printed = 0;
+
+ if (print_off && (!dso || !dso__is_object_file(dso)))
+ print_off = false;
+ printed += fprintf(fp, " (");
+ printed += __map__fprintf_dsoname(map, print_off, fp);
+ if (print_off)
+ printed += fprintf(fp, "+0x%" PRIx64, addr);
+ printed += fprintf(fp, ")");
+
+ return printed;
+}
+
char *map__srcline(struct map *map, u64 addr, struct symbol *sym)
{
if (map == NULL)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 823ab7fc0acf..66a87b3d9965 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -194,6 +194,7 @@ static inline void __map__zput(struct map **map)
size_t map__fprintf(struct map *map, FILE *fp);
size_t map__fprintf_dsoname(struct map *map, FILE *fp);
+size_t map__fprintf_dsoname_dsoff(struct map *map, bool print_off, u64 addr, FILE *fp);
char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
diff --git a/tools/power/cpupower/lib/powercap.c b/tools/power/cpupower/lib/powercap.c
index 0ce29ee4c2e4..a7a59c6bacda 100644
--- a/tools/power/cpupower/lib/powercap.c
+++ b/tools/power/cpupower/lib/powercap.c
@@ -40,25 +40,34 @@ static int sysfs_get_enabled(char *path, int *mode)
{
int fd;
char yes_no;
+ int ret = 0;
*mode = 0;
fd = open(path, O_RDONLY);
- if (fd == -1)
- return -1;
+ if (fd == -1) {
+ ret = -1;
+ goto out;
+ }
if (read(fd, &yes_no, 1) != 1) {
- close(fd);
- return -1;
+ ret = -1;
+ goto out_close;
}
if (yes_no == '1') {
*mode = 1;
- return 0;
+ goto out_close;
} else if (yes_no == '0') {
- return 0;
+ goto out_close;
+ } else {
+ ret = -1;
+ goto out_close;
}
- return -1;
+out_close:
+ close(fd);
+out:
+ return ret;
}
int powercap_get_enabled(int *mode)
diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
index e7d48cb563c0..ae6af354a81d 100644
--- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
@@ -70,8 +70,8 @@ static int max_freq_mode;
*/
static unsigned long max_frequency;
-static unsigned long long tsc_at_measure_start;
-static unsigned long long tsc_at_measure_end;
+static unsigned long long *tsc_at_measure_start;
+static unsigned long long *tsc_at_measure_end;
static unsigned long long *mperf_previous_count;
static unsigned long long *aperf_previous_count;
static unsigned long long *mperf_current_count;
@@ -169,7 +169,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
if (max_freq_mode == MAX_FREQ_TSC_REF) {
- tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
*percent = 100.0 * mperf_diff / tsc_diff;
dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
mperf_cstates[id].name, mperf_diff, tsc_diff);
@@ -206,7 +206,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
if (max_freq_mode == MAX_FREQ_TSC_REF) {
/* Calculate max_freq from TSC count */
- tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
time_diff = timespec_diff_us(time_start, time_end);
max_frequency = tsc_diff / time_diff;
}
@@ -225,33 +225,27 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
static int mperf_start(void)
{
int cpu;
- unsigned long long dbg;
clock_gettime(CLOCK_REALTIME, &time_start);
- mperf_get_tsc(&tsc_at_measure_start);
- for (cpu = 0; cpu < cpu_count; cpu++)
+ for (cpu = 0; cpu < cpu_count; cpu++) {
+ mperf_get_tsc(&tsc_at_measure_start[cpu]);
mperf_init_stats(cpu);
+ }
- mperf_get_tsc(&dbg);
- dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
return 0;
}
static int mperf_stop(void)
{
- unsigned long long dbg;
int cpu;
- for (cpu = 0; cpu < cpu_count; cpu++)
+ for (cpu = 0; cpu < cpu_count; cpu++) {
mperf_measure_stats(cpu);
+ mperf_get_tsc(&tsc_at_measure_end[cpu]);
+ }
- mperf_get_tsc(&tsc_at_measure_end);
clock_gettime(CLOCK_REALTIME, &time_end);
-
- mperf_get_tsc(&dbg);
- dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
-
return 0;
}
@@ -353,7 +347,8 @@ struct cpuidle_monitor *mperf_register(void)
aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
-
+ tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
+ tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
mperf_monitor.name_len = strlen(mperf_monitor.name);
return &mperf_monitor;
}
@@ -364,6 +359,8 @@ void mperf_unregister(void)
free(aperf_previous_count);
free(mperf_current_count);
free(aperf_current_count);
+ free(tsc_at_measure_start);
+ free(tsc_at_measure_end);
free(is_valid);
}
diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h
index 1bb0afc21309..81563c3dfce7 100644
--- a/tools/testing/radix-tree/linux/init.h
+++ b/tools/testing/radix-tree/linux/init.h
@@ -1 +1,2 @@
#define __init
+#define __exit
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
index 9286d3baa12d..03539d86cdf0 100644
--- a/tools/testing/radix-tree/maple.c
+++ b/tools/testing/radix-tree/maple.c
@@ -14,6 +14,7 @@
#include "test.h"
#include <stdlib.h>
#include <time.h>
+#include "linux/init.h"
#define module_init(x)
#define module_exit(x)
@@ -22,7 +23,6 @@
#define dump_stack() assert(0)
#include "../../../lib/maple_tree.c"
-#undef CONFIG_DEBUG_MAPLE_TREE
#include "../../../lib/test_maple_tree.c"
#define RCU_RANGE_COUNT 1000
@@ -81,7 +81,7 @@ static void check_mas_alloc_node_count(struct ma_state *mas)
* check_new_node() - Check the creation of new nodes and error path
* verification.
*/
-static noinline void check_new_node(struct maple_tree *mt)
+static noinline void __init check_new_node(struct maple_tree *mt)
{
struct maple_node *mn, *mn2, *mn3;
@@ -455,7 +455,7 @@ static noinline void check_new_node(struct maple_tree *mt)
/*
* Check erasing including RCU.
*/
-static noinline void check_erase(struct maple_tree *mt, unsigned long index,
+static noinline void __init check_erase(struct maple_tree *mt, unsigned long index,
void *ptr)
{
MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr);
@@ -465,24 +465,24 @@ static noinline void check_erase(struct maple_tree *mt, unsigned long index,
#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2])
#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2])
-static noinline void check_erase_testset(struct maple_tree *mt)
+static noinline void __init check_erase_testset(struct maple_tree *mt)
{
- unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
- 1001, 1002, 1003, 1005, 0,
- 6003, 6002, 6008, 6012, 6015,
- 7003, 7002, 7008, 7012, 7015,
- 8003, 8002, 8008, 8012, 8015,
- 9003, 9002, 9008, 9012, 9015,
- 10003, 10002, 10008, 10012, 10015,
- 11003, 11002, 11008, 11012, 11015,
- 12003, 12002, 12008, 12012, 12015,
- 13003, 13002, 13008, 13012, 13015,
- 14003, 14002, 14008, 14012, 14015,
- 15003, 15002, 15008, 15012, 15015,
- };
-
-
- void *ptr = &set;
+ static const unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 6003, 6002, 6008, 6012, 6015,
+ 7003, 7002, 7008, 7012, 7015,
+ 8003, 8002, 8008, 8012, 8015,
+ 9003, 9002, 9008, 9012, 9015,
+ 10003, 10002, 10008, 10012, 10015,
+ 11003, 11002, 11008, 11012, 11015,
+ 12003, 12002, 12008, 12012, 12015,
+ 13003, 13002, 13008, 13012, 13015,
+ 14003, 14002, 14008, 14012, 14015,
+ 15003, 15002, 15008, 15012, 15015,
+ };
+
+
+ void *ptr = &check_erase_testset;
void *entry[2] = { ptr, mt };
void *root_node;
@@ -739,7 +739,7 @@ static noinline void check_erase_testset(struct maple_tree *mt)
int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end,
void *s_entry, unsigned long s_min,
void *e_entry, unsigned long e_max,
- unsigned long *set, int i, bool null_entry)
+ const unsigned long *set, int i, bool null_entry)
{
int count = 0, span = 0;
unsigned long retry = 0;
@@ -969,8 +969,8 @@ retry:
}
#if defined(CONFIG_64BIT)
-static noinline void check_erase2_testset(struct maple_tree *mt,
- unsigned long *set, unsigned long size)
+static noinline void __init check_erase2_testset(struct maple_tree *mt,
+ const unsigned long *set, unsigned long size)
{
int entry_count = 0;
int check = 0;
@@ -1054,7 +1054,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt,
if (entry_count)
MT_BUG_ON(mt, !mt_height(mt));
#if check_erase2_debug > 1
- mt_dump(mt);
+ mt_dump(mt, mt_dump_hex);
#endif
#if check_erase2_debug
pr_err("Done\n");
@@ -1085,7 +1085,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt,
mas_for_each(&mas, foo, ULONG_MAX) {
if (xa_is_zero(foo)) {
if (addr == mas.index) {
- mt_dump(mas.tree);
+ mt_dump(mas.tree, mt_dump_hex);
pr_err("retry failed %lu - %lu\n",
mas.index, mas.last);
MT_BUG_ON(mt, 1);
@@ -1114,11 +1114,11 @@ static noinline void check_erase2_testset(struct maple_tree *mt,
/* These tests were pulled from KVM tree modifications which failed. */
-static noinline void check_erase2_sets(struct maple_tree *mt)
+static noinline void __init check_erase2_sets(struct maple_tree *mt)
{
void *entry;
unsigned long start = 0;
- unsigned long set[] = {
+ static const unsigned long set[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140721266458624, 140737488351231,
ERASE, 140721266458624, 140737488351231,
@@ -1136,7 +1136,7 @@ ERASE, 140253902692352, 140253902864383,
STORE, 140253902692352, 140253902696447,
STORE, 140253902696448, 140253902864383,
};
- unsigned long set2[] = {
+ static const unsigned long set2[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140735933583360, 140737488351231,
ERASE, 140735933583360, 140737488351231,
@@ -1160,7 +1160,7 @@ STORE, 140277094813696, 140277094821887,
STORE, 140277094821888, 140277094825983,
STORE, 140735933906944, 140735933911039,
};
- unsigned long set3[] = {
+ static const unsigned long set3[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140735790264320, 140737488351231,
ERASE, 140735790264320, 140737488351231,
@@ -1203,7 +1203,7 @@ STORE, 47135835840512, 47135835885567,
STORE, 47135835885568, 47135835893759,
};
- unsigned long set4[] = {
+ static const unsigned long set4[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140728251703296, 140737488351231,
ERASE, 140728251703296, 140737488351231,
@@ -1224,7 +1224,7 @@ ERASE, 47646523277312, 47646523445247,
STORE, 47646523277312, 47646523400191,
};
- unsigned long set5[] = {
+ static const unsigned long set5[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140726874062848, 140737488351231,
ERASE, 140726874062848, 140737488351231,
@@ -1357,7 +1357,7 @@ STORE, 47884791619584, 47884791623679,
STORE, 47884791623680, 47884791627775,
};
- unsigned long set6[] = {
+ static const unsigned long set6[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140722999021568, 140737488351231,
ERASE, 140722999021568, 140737488351231,
@@ -1489,7 +1489,7 @@ ERASE, 47430432014336, 47430432022527,
STORE, 47430432014336, 47430432018431,
STORE, 47430432018432, 47430432022527,
};
- unsigned long set7[] = {
+ static const unsigned long set7[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140729808330752, 140737488351231,
ERASE, 140729808330752, 140737488351231,
@@ -1621,7 +1621,7 @@ ERASE, 47439987130368, 47439987138559,
STORE, 47439987130368, 47439987134463,
STORE, 47439987134464, 47439987138559,
};
- unsigned long set8[] = {
+ static const unsigned long set8[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140722482974720, 140737488351231,
ERASE, 140722482974720, 140737488351231,
@@ -1754,7 +1754,7 @@ STORE, 47708488638464, 47708488642559,
STORE, 47708488642560, 47708488646655,
};
- unsigned long set9[] = {
+ static const unsigned long set9[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140736427839488, 140737488351231,
ERASE, 140736427839488, 140736427839488,
@@ -5620,7 +5620,7 @@ ERASE, 47906195480576, 47906195480576,
STORE, 94641242615808, 94641242750975,
};
- unsigned long set10[] = {
+ static const unsigned long set10[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140736427839488, 140737488351231,
ERASE, 140736427839488, 140736427839488,
@@ -9484,7 +9484,7 @@ STORE, 139726599680000, 139726599684095,
ERASE, 47906195480576, 47906195480576,
STORE, 94641242615808, 94641242750975,
};
- unsigned long set11[] = {
+ static const unsigned long set11[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140732658499584, 140737488351231,
ERASE, 140732658499584, 140732658499584,
@@ -9510,7 +9510,7 @@ STORE, 140732658565120, 140732658569215,
STORE, 140732658552832, 140732658565119,
};
- unsigned long set12[] = { /* contains 12 values. */
+ static const unsigned long set12[] = { /* contains 12 values. */
STORE, 140737488347136, 140737488351231,
STORE, 140732658499584, 140737488351231,
ERASE, 140732658499584, 140732658499584,
@@ -9537,7 +9537,7 @@ STORE, 140732658552832, 140732658565119,
STORE, 140014592741375, 140014592741375, /* contrived */
STORE, 140014592733184, 140014592741376, /* creates first entry retry. */
};
- unsigned long set13[] = {
+ static const unsigned long set13[] = {
STORE, 140373516247040, 140373516251135,/*: ffffa2e7b0e10d80 */
STORE, 140373516251136, 140373516255231,/*: ffffa2e7b1195d80 */
STORE, 140373516255232, 140373516443647,/*: ffffa2e7b0e109c0 */
@@ -9550,7 +9550,7 @@ STORE, 140373518684160, 140373518688254,/*: ffffa2e7b05fec00 */
STORE, 140373518688256, 140373518692351,/*: ffffa2e7bfbdcd80 */
STORE, 140373518692352, 140373518696447,/*: ffffa2e7b0749e40 */
};
- unsigned long set14[] = {
+ static const unsigned long set14[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140731667996672, 140737488351231,
SNULL, 140731668000767, 140737488351231,
@@ -9834,7 +9834,7 @@ SNULL, 139826136543232, 139826136809471,
STORE, 139826136809472, 139826136842239,
STORE, 139826136543232, 139826136809471,
};
- unsigned long set15[] = {
+ static const unsigned long set15[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140722061451264, 140737488351231,
SNULL, 140722061455359, 140737488351231,
@@ -10119,7 +10119,7 @@ STORE, 139906808958976, 139906808991743,
STORE, 139906808692736, 139906808958975,
};
- unsigned long set16[] = {
+ static const unsigned long set16[] = {
STORE, 94174808662016, 94174809321471,
STORE, 94174811414528, 94174811426815,
STORE, 94174811426816, 94174811430911,
@@ -10330,7 +10330,7 @@ STORE, 139921865613312, 139921865617407,
STORE, 139921865547776, 139921865564159,
};
- unsigned long set17[] = {
+ static const unsigned long set17[] = {
STORE, 94397057224704, 94397057646591,
STORE, 94397057650688, 94397057691647,
STORE, 94397057691648, 94397057695743,
@@ -10392,7 +10392,7 @@ STORE, 140720477511680, 140720477646847,
STORE, 140720478302208, 140720478314495,
STORE, 140720478314496, 140720478318591,
};
- unsigned long set18[] = {
+ static const unsigned long set18[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140724953673728, 140737488351231,
SNULL, 140724953677823, 140737488351231,
@@ -10425,7 +10425,7 @@ STORE, 140222970597376, 140222970605567,
ERASE, 140222970597376, 140222970605567,
STORE, 140222970597376, 140222970605567,
};
- unsigned long set19[] = {
+ static const unsigned long set19[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140725182459904, 140737488351231,
SNULL, 140725182463999, 140737488351231,
@@ -10694,7 +10694,7 @@ STORE, 140656836775936, 140656836780031,
STORE, 140656787476480, 140656791920639,
ERASE, 140656774639616, 140656779083775,
};
- unsigned long set20[] = {
+ static const unsigned long set20[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140735952392192, 140737488351231,
SNULL, 140735952396287, 140737488351231,
@@ -10850,7 +10850,7 @@ STORE, 140590386819072, 140590386823167,
STORE, 140590386823168, 140590386827263,
SNULL, 140590376591359, 140590376595455,
};
- unsigned long set21[] = {
+ static const unsigned long set21[] = {
STORE, 93874710941696, 93874711363583,
STORE, 93874711367680, 93874711408639,
STORE, 93874711408640, 93874711412735,
@@ -10920,7 +10920,7 @@ ERASE, 140708393312256, 140708393316351,
ERASE, 140708393308160, 140708393312255,
ERASE, 140708393291776, 140708393308159,
};
- unsigned long set22[] = {
+ static const unsigned long set22[] = {
STORE, 93951397134336, 93951397183487,
STORE, 93951397183488, 93951397728255,
STORE, 93951397728256, 93951397826559,
@@ -11047,7 +11047,7 @@ STORE, 140551361253376, 140551361519615,
ERASE, 140551361253376, 140551361519615,
};
- unsigned long set23[] = {
+ static const unsigned long set23[] = {
STORE, 94014447943680, 94014448156671,
STORE, 94014450253824, 94014450257919,
STORE, 94014450257920, 94014450266111,
@@ -14371,7 +14371,7 @@ SNULL, 140175956627455, 140175985139711,
STORE, 140175927242752, 140175956627455,
STORE, 140175956627456, 140175985139711,
};
- unsigned long set24[] = {
+ static const unsigned long set24[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140735281639424, 140737488351231,
SNULL, 140735281643519, 140737488351231,
@@ -15533,7 +15533,7 @@ ERASE, 139635393024000, 139635401412607,
ERASE, 139635384627200, 139635384631295,
ERASE, 139635384631296, 139635393019903,
};
- unsigned long set25[] = {
+ static const unsigned long set25[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140737488343040, 140737488351231,
STORE, 140722547441664, 140737488351231,
@@ -22321,7 +22321,7 @@ STORE, 140249652703232, 140249682087935,
STORE, 140249682087936, 140249710600191,
};
- unsigned long set26[] = {
+ static const unsigned long set26[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140729464770560, 140737488351231,
SNULL, 140729464774655, 140737488351231,
@@ -22345,7 +22345,7 @@ ERASE, 140109040951296, 140109040959487,
STORE, 140109040955392, 140109040959487,
ERASE, 140109040955392, 140109040959487,
};
- unsigned long set27[] = {
+ static const unsigned long set27[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140726128070656, 140737488351231,
SNULL, 140726128074751, 140737488351231,
@@ -22741,7 +22741,7 @@ STORE, 140415509696512, 140415535910911,
ERASE, 140415537422336, 140415562588159,
STORE, 140415482433536, 140415509696511,
};
- unsigned long set28[] = {
+ static const unsigned long set28[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140722475622400, 140737488351231,
SNULL, 140722475626495, 140737488351231,
@@ -22809,7 +22809,7 @@ STORE, 139918413348864, 139918413352959,
ERASE, 139918413316096, 139918413344767,
STORE, 93865848528896, 93865848664063,
};
- unsigned long set29[] = {
+ static const unsigned long set29[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140734467944448, 140737488351231,
SNULL, 140734467948543, 140737488351231,
@@ -23684,7 +23684,7 @@ ERASE, 140143079972864, 140143088361471,
ERASE, 140143205793792, 140143205797887,
ERASE, 140143205797888, 140143214186495,
};
- unsigned long set30[] = {
+ static const unsigned long set30[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140733436743680, 140737488351231,
SNULL, 140733436747775, 140737488351231,
@@ -24566,7 +24566,7 @@ ERASE, 140165225893888, 140165225897983,
ERASE, 140165225897984, 140165234286591,
ERASE, 140165058105344, 140165058109439,
};
- unsigned long set31[] = {
+ static const unsigned long set31[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140730890784768, 140737488351231,
SNULL, 140730890788863, 140737488351231,
@@ -25379,7 +25379,7 @@ ERASE, 140623906590720, 140623914979327,
ERASE, 140622950277120, 140622950281215,
ERASE, 140622950281216, 140622958669823,
};
- unsigned long set32[] = {
+ static const unsigned long set32[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140731244212224, 140737488351231,
SNULL, 140731244216319, 140737488351231,
@@ -26175,7 +26175,7 @@ ERASE, 140400417288192, 140400425676799,
ERASE, 140400283066368, 140400283070463,
ERASE, 140400283070464, 140400291459071,
};
- unsigned long set33[] = {
+ static const unsigned long set33[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140734562918400, 140737488351231,
SNULL, 140734562922495, 140737488351231,
@@ -26317,7 +26317,7 @@ STORE, 140582961786880, 140583003750399,
ERASE, 140582961786880, 140583003750399,
};
- unsigned long set34[] = {
+ static const unsigned long set34[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140731327180800, 140737488351231,
SNULL, 140731327184895, 140737488351231,
@@ -27198,7 +27198,7 @@ ERASE, 140012522094592, 140012530483199,
ERASE, 140012033142784, 140012033146879,
ERASE, 140012033146880, 140012041535487,
};
- unsigned long set35[] = {
+ static const unsigned long set35[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140730536939520, 140737488351231,
SNULL, 140730536943615, 140737488351231,
@@ -27955,7 +27955,7 @@ ERASE, 140474471936000, 140474480324607,
ERASE, 140474396430336, 140474396434431,
ERASE, 140474396434432, 140474404823039,
};
- unsigned long set36[] = {
+ static const unsigned long set36[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140723893125120, 140737488351231,
SNULL, 140723893129215, 140737488351231,
@@ -28816,7 +28816,7 @@ ERASE, 140121890357248, 140121898745855,
ERASE, 140121269587968, 140121269592063,
ERASE, 140121269592064, 140121277980671,
};
- unsigned long set37[] = {
+ static const unsigned long set37[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140722404016128, 140737488351231,
SNULL, 140722404020223, 140737488351231,
@@ -28942,7 +28942,7 @@ STORE, 139759821246464, 139759888355327,
ERASE, 139759821246464, 139759888355327,
ERASE, 139759888355328, 139759955464191,
};
- unsigned long set38[] = {
+ static const unsigned long set38[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140730666221568, 140737488351231,
SNULL, 140730666225663, 140737488351231,
@@ -29752,7 +29752,7 @@ ERASE, 140613504712704, 140613504716799,
ERASE, 140613504716800, 140613513105407,
};
- unsigned long set39[] = {
+ static const unsigned long set39[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140736271417344, 140737488351231,
SNULL, 140736271421439, 140737488351231,
@@ -30124,7 +30124,7 @@ STORE, 140325364428800, 140325372821503,
STORE, 140325356036096, 140325364428799,
SNULL, 140325364432895, 140325372821503,
};
- unsigned long set40[] = {
+ static const unsigned long set40[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140734309167104, 140737488351231,
SNULL, 140734309171199, 140737488351231,
@@ -30875,7 +30875,7 @@ ERASE, 140320289300480, 140320289304575,
ERASE, 140320289304576, 140320297693183,
ERASE, 140320163409920, 140320163414015,
};
- unsigned long set41[] = {
+ static const unsigned long set41[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140728157171712, 140737488351231,
SNULL, 140728157175807, 140737488351231,
@@ -31185,7 +31185,7 @@ STORE, 94376135090176, 94376135094271,
STORE, 94376135094272, 94376135098367,
SNULL, 94376135094272, 94377208836095,
};
- unsigned long set42[] = {
+ static const unsigned long set42[] = {
STORE, 314572800, 1388314623,
STORE, 1462157312, 1462169599,
STORE, 1462169600, 1462185983,
@@ -33862,7 +33862,7 @@ SNULL, 3798999040, 3799101439,
*/
};
- unsigned long set43[] = {
+ static const unsigned long set43[] = {
STORE, 140737488347136, 140737488351231,
STORE, 140734187720704, 140737488351231,
SNULL, 140734187724800, 140737488351231,
@@ -34513,7 +34513,7 @@ static void *rcu_reader_rev(void *ptr)
if (mas.index != r_start) {
alt = xa_mk_value(index + i * 2 + 1 +
RCU_RANGE_COUNT);
- mt_dump(test->mt);
+ mt_dump(test->mt, mt_dump_dec);
printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
mas.index, mas.last, entry,
r_start, r_end, expected, alt,
@@ -34996,7 +34996,7 @@ void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
MT_BUG_ON(mt, !vals->seen_entry3);
MT_BUG_ON(mt, !vals->seen_both);
}
-static noinline void check_rcu_simulated(struct maple_tree *mt)
+static noinline void __init check_rcu_simulated(struct maple_tree *mt)
{
unsigned long i, nr_entries = 1000;
unsigned long target = 4320;
@@ -35157,7 +35157,7 @@ static noinline void check_rcu_simulated(struct maple_tree *mt)
rcu_unregister_thread();
}
-static noinline void check_rcu_threaded(struct maple_tree *mt)
+static noinline void __init check_rcu_threaded(struct maple_tree *mt)
{
unsigned long i, nr_entries = 1000;
struct rcu_test_struct vals;
@@ -35259,6 +35259,7 @@ static void mas_dfs_preorder(struct ma_state *mas)
struct maple_enode *prev;
unsigned char end, slot = 0;
+ unsigned long *pivots;
if (mas->node == MAS_START) {
mas_start(mas);
@@ -35291,6 +35292,9 @@ walk_up:
mas_ascend(mas);
goto walk_up;
}
+ pivots = ma_pivots(mte_to_node(prev), mte_node_type(prev));
+ mas->max = mas_safe_pivot(mas, pivots, slot, mte_node_type(prev));
+ mas->min = mas_safe_min(mas, pivots, slot);
return;
done:
@@ -35366,7 +35370,7 @@ static void check_dfs_preorder(struct maple_tree *mt)
/* End of depth first search tests */
/* Preallocation testing */
-static noinline void check_prealloc(struct maple_tree *mt)
+static noinline void __init check_prealloc(struct maple_tree *mt)
{
unsigned long i, max = 100;
unsigned long allocated;
@@ -35494,7 +35498,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
/* End of preallocation testing */
/* Spanning writes, writes that span nodes and layers of the tree */
-static noinline void check_spanning_write(struct maple_tree *mt)
+static noinline void __init check_spanning_write(struct maple_tree *mt)
{
unsigned long i, max = 5000;
MA_STATE(mas, mt, 1200, 2380);
@@ -35662,7 +35666,7 @@ static noinline void check_spanning_write(struct maple_tree *mt)
/* End of spanning write testing */
/* Writes to a NULL area that are adjacent to other NULLs */
-static noinline void check_null_expand(struct maple_tree *mt)
+static noinline void __init check_null_expand(struct maple_tree *mt)
{
unsigned long i, max = 100;
unsigned char data_end;
@@ -35723,7 +35727,7 @@ static noinline void check_null_expand(struct maple_tree *mt)
/* End of NULL area expansions */
/* Checking for no memory is best done outside the kernel */
-static noinline void check_nomem(struct maple_tree *mt)
+static noinline void __init check_nomem(struct maple_tree *mt)
{
MA_STATE(ms, mt, 1, 1);
@@ -35758,7 +35762,7 @@ static noinline void check_nomem(struct maple_tree *mt)
mtree_destroy(mt);
}
-static noinline void check_locky(struct maple_tree *mt)
+static noinline void __init check_locky(struct maple_tree *mt)
{
MA_STATE(ms, mt, 2, 2);
MA_STATE(reader, mt, 2, 2);
@@ -35780,10 +35784,10 @@ void farmer_tests(void)
struct maple_node *node;
DEFINE_MTREE(tree);
- mt_dump(&tree);
+ mt_dump(&tree, mt_dump_dec);
tree.ma_root = xa_mk_value(0);
- mt_dump(&tree);
+ mt_dump(&tree, mt_dump_dec);
node = mt_alloc_one(GFP_KERNEL);
node->parent = (void *)((unsigned long)(&tree) | 1);
@@ -35793,7 +35797,7 @@ void farmer_tests(void)
node->mr64.pivot[1] = 1;
node->mr64.pivot[2] = 0;
tree.ma_root = mt_mk_node(node, maple_leaf_64);
- mt_dump(&tree);
+ mt_dump(&tree, mt_dump_dec);
node->parent = ma_parent_ptr(node);
ma_free_rcu(node);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 90a62cf75008..5d6fc3f39284 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -4,6 +4,7 @@ TARGETS += amd-pstate
TARGETS += arm64
TARGETS += bpf
TARGETS += breakpoints
+TARGETS += cachestat
TARGETS += capabilities
TARGETS += cgroup
TARGETS += clone3
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64
index 0a6837f97c32..cd42e2825bd2 100644
--- a/tools/testing/selftests/bpf/DENYLIST.aarch64
+++ b/tools/testing/selftests/bpf/DENYLIST.aarch64
@@ -1,33 +1,6 @@
-bloom_filter_map # libbpf: prog 'check_bloom': failed to attach: ERROR: strerror_r(-524)=22
-bpf_cookie/lsm
-bpf_cookie/multi_kprobe_attach_api
-bpf_cookie/multi_kprobe_link_api
-bpf_cookie/trampoline
-bpf_loop/check_callback_fn_stop # link unexpected error: -524
-bpf_loop/check_invalid_flags
-bpf_loop/check_nested_calls
-bpf_loop/check_non_constant_callback
-bpf_loop/check_nr_loops
-bpf_loop/check_null_callback_ctx
-bpf_loop/check_stack
-bpf_mod_race # bpf_mod_kfunc_race__attach unexpected error: -524 (errno 524)
-bpf_tcp_ca/dctcp_fallback
-btf_dump/btf_dump: var_data # find type id unexpected find type id: actual -2 < expected 0
-cgroup_hierarchical_stats # attach unexpected error: -524 (errno 524)
-d_path/basic # setup attach failed: -524
-deny_namespace # attach unexpected error: -524 (errno 524)
-fentry_fexit # fentry_attach unexpected error: -1 (errno 524)
-fentry_test # fentry_attach unexpected error: -1 (errno 524)
-fexit_sleep # fexit_attach fexit attach failed: -1
-fexit_stress # fexit attach unexpected fexit attach: actual -524 < expected 0
-fexit_test # fexit_attach unexpected error: -1 (errno 524)
-get_func_args_test # get_func_args_test__attach unexpected error: -524 (errno 524) (trampoline)
-get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (errno 524) (trampoline)
-htab_update/reenter_update
-kfree_skb # attach fentry unexpected error: -524 (trampoline)
-kfunc_call/subprog # extern (var ksym) 'bpf_prog_active': not found in kernel BTF
-kfunc_call/subprog_lskel # skel unexpected error: -2
-kfunc_dynptr_param/dynptr_data_null # libbpf: prog 'dynptr_data_null': failed to attach: ERROR: strerror_r(-524)=22
+bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
+fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95
@@ -35,51 +8,6 @@ kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_mu
kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0
kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0
-kprobe_multi_test/skel_api # kprobe_multi__attach unexpected error: -524 (errno 524)
-ksyms_module/libbpf # 'bpf_testmod_ksym_percpu': not found in kernel BTF
-ksyms_module/lskel # test_ksyms_module_lskel__open_and_load unexpected error: -2
-libbpf_get_fd_by_id_opts # test_libbpf_get_fd_by_id_opts__attach unexpected error: -524 (errno 524)
-linked_list
-lookup_key # test_lookup_key__attach unexpected error: -524 (errno 524)
-lru_bug # lru_bug__attach unexpected error: -524 (errno 524)
-modify_return # modify_return__attach failed unexpected error: -524 (errno 524)
-module_attach # skel_attach skeleton attach failed: -524
-module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0
-mptcp/base # run_test mptcp unexpected error: -524 (errno 524)
-netcnt # packets unexpected packets: actual 10001 != expected 10000
-rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22
-recursion # skel_attach unexpected error: -524 (errno 524)
-ringbuf # skel_attach skeleton attachment failed: -1
-setget_sockopt # attach_cgroup unexpected error: -524
-sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (errno 524)
-skc_to_unix_sock # could not attach BPF object unexpected error: -524 (errno 524)
-socket_cookie # prog_attach unexpected error: -524
-stacktrace_build_id # compare_stack_ips stackmap vs. stack_amap err -1 errno 2
-task_local_storage/exit_creds # skel_attach unexpected error: -524 (errno 524)
-task_local_storage/recursion # skel_attach unexpected error: -524 (errno 524)
-test_bprm_opts # attach attach failed: -524
-test_ima # attach attach failed: -524
-test_local_storage # attach lsm attach failed: -524
-test_lsm # test_lsm_first_attach unexpected error: -524 (errno 524)
-test_overhead # attach_fentry unexpected error: -524
-timer # timer unexpected error: -524 (errno 524)
-timer_crash # timer_crash__attach unexpected error: -524 (errno 524)
-timer_mim # timer_mim unexpected error: -524 (errno 524)
-trace_printk # trace_printk__attach unexpected error: -1 (errno 524)
-trace_vprintk # trace_vprintk__attach unexpected error: -1 (errno 524)
+kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3
+module_attach # prog 'kprobe_multi': failed to auto-attach: -95
tracing_struct # tracing_struct__attach unexpected error: -524 (errno 524)
-trampoline_count # attach_prog unexpected error: -524
-unpriv_bpf_disabled # skel_attach unexpected error: -524 (errno 524)
-user_ringbuf/test_user_ringbuf_post_misaligned # misaligned_skel unexpected error: -524 (errno 524)
-user_ringbuf/test_user_ringbuf_post_producer_wrong_offset
-user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz
-user_ringbuf/test_user_ringbuf_basic # ringbuf_basic_skel unexpected error: -524 (errno 524)
-user_ringbuf/test_user_ringbuf_sample_full_ring_buffer
-user_ringbuf/test_user_ringbuf_post_alignment_autoadjust
-user_ringbuf/test_user_ringbuf_overfill
-user_ringbuf/test_user_ringbuf_discards_properly_ignored
-user_ringbuf/test_user_ringbuf_loop
-user_ringbuf/test_user_ringbuf_msg_protocol
-user_ringbuf/test_user_ringbuf_blocking_reserve
-verify_pkcs7_sig # test_verify_pkcs7_sig__attach unexpected error: -524 (errno 524)
-vmlinux # skel_attach skeleton attach failed: -524
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index c7463f3ec3c0..5061d9e24c16 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -26,3 +26,4 @@ user_ringbuf # failed to find kernel BTF type ID of
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
xdp_metadata # JIT does not support calling kernel function (kfunc)
+test_task_under_cgroup # JIT does not support calling kernel function (kfunc)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c49e5403ad0e..28d2c77262be 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -197,7 +197,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r
$(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
$(call msg,SIGN-FILE,,$@)
- $(Q)$(CC) $(shell $(HOSTPKG_CONFIG)--cflags libcrypto 2> /dev/null) \
+ $(Q)$(CC) $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) \
$< -o $@ \
$(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h
index 8c993ec8ceea..f3c41f8902a0 100644
--- a/tools/testing/selftests/bpf/bpf_kfuncs.h
+++ b/tools/testing/selftests/bpf/bpf_kfuncs.h
@@ -35,4 +35,10 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset,
extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset,
void *buffer, __u32 buffer__szk) __ksym;
+extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym;
+extern int bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym;
+extern int bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym;
+extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym;
+extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym;
+
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
index b17bfa0e0aac..bb143de68875 100644
--- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -96,12 +96,80 @@ static void test_parse_test_list(void)
goto error;
ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("t", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name");
+ ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name");
error:
free_test_filter_set(&set);
}
+static void test_parse_test_list_file(void)
+{
+ struct test_filter_set set;
+ char tmpfile[80];
+ FILE *fp;
+ int fd;
+
+ snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX");
+ fd = mkstemp(tmpfile);
+ if (!ASSERT_GE(fd, 0, "create tmp"))
+ return;
+
+ fp = fdopen(fd, "w");
+ if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) {
+ close(fd);
+ goto out_remove;
+ }
+
+ fprintf(fp, "# comment\n");
+ fprintf(fp, " test_with_spaces \n");
+ fprintf(fp, "testA/subtest # comment\n");
+ fprintf(fp, "testB#comment with no space\n");
+ fprintf(fp, "testB # duplicate\n");
+ fprintf(fp, "testA/subtest # subtest duplicate\n");
+ fprintf(fp, "testA/subtest2\n");
+ fprintf(fp, "testC_no_eof_newline");
+ fflush(fp);
+
+ if (!ASSERT_OK(ferror(fp), "prepare tmp"))
+ goto out_fclose;
+
+ init_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file");
+
+ ASSERT_EQ(set.cnt, 4, "test count");
+ ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name");
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count");
+ ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count");
+ ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0");
+ ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1");
+ ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name");
+ ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name");
+
+ free_test_filter_set(&set);
+
+out_fclose:
+ fclose(fp);
+out_remove:
+ remove(tmpfile);
+}
+
void test_arg_parsing(void)
{
if (test__start_subtest("test_parse_test_list"))
test_parse_test_list();
+ if (test__start_subtest("test_parse_test_list_file"))
+ test_parse_test_list_file();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c
index d176c34a7d2e..7cfac53c0d58 100644
--- a/tools/testing/selftests/bpf/prog_tests/dynptr.c
+++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c
@@ -20,6 +20,14 @@ static struct {
{"test_ringbuf", SETUP_SYSCALL_SLEEP},
{"test_skb_readonly", SETUP_SKB_PROG},
{"test_dynptr_skb_data", SETUP_SKB_PROG},
+ {"test_adjust", SETUP_SYSCALL_SLEEP},
+ {"test_adjust_err", SETUP_SYSCALL_SLEEP},
+ {"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_is_null", SETUP_SYSCALL_SLEEP},
+ {"test_dynptr_is_rdonly", SETUP_SKB_PROG},
+ {"test_dynptr_clone", SETUP_SKB_PROG},
+ {"test_dynptr_skb_no_buff", SETUP_SKB_PROG},
+ {"test_dynptr_skb_strcmp", SETUP_SKB_PROG},
};
static void verify_success(const char *prog_name, enum test_setup_type setup_type)
diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
new file mode 100644
index 000000000000..4224727fb364
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <cgroup_helpers.h>
+#include "test_task_under_cgroup.skel.h"
+
+#define FOO "/foo"
+
+void test_task_under_cgroup(void)
+{
+ struct test_task_under_cgroup *skel;
+ int ret, foo;
+ pid_t pid;
+
+ foo = test__join_cgroup(FOO);
+ if (!ASSERT_OK(foo < 0, "cgroup_join_foo"))
+ return;
+
+ skel = test_task_under_cgroup__open();
+ if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open"))
+ goto cleanup;
+
+ skel->rodata->local_pid = getpid();
+ skel->bss->remote_pid = getpid();
+ skel->rodata->cgid = get_cgroup_id(FOO);
+
+ ret = test_task_under_cgroup__load(skel);
+ if (!ASSERT_OK(ret, "test_task_under_cgroup__load"))
+ goto cleanup;
+
+ ret = test_task_under_cgroup__attach(skel);
+ if (!ASSERT_OK(ret, "test_task_under_cgroup__attach"))
+ goto cleanup;
+
+ pid = fork();
+ if (pid == 0)
+ exit(0);
+
+ ret = (pid == -1);
+ if (ASSERT_OK(ret, "fork process"))
+ wait(NULL);
+
+ test_task_under_cgroup__detach(skel);
+
+ ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid,
+ "test task_under_cgroup");
+
+cleanup:
+ test_task_under_cgroup__destroy(skel);
+ close(foo);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 2497716ee379..531621adef42 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -55,6 +55,7 @@
#include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h"
+#include "verifier_subprog_precision.skel.h"
#include "verifier_subreg.skel.h"
#include "verifier_uninit.skel.h"
#include "verifier_unpriv.skel.h"
@@ -154,6 +155,7 @@ void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
+void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
index d3c1217ba79a..38a57a2e70db 100644
--- a/tools/testing/selftests/bpf/progs/bpf_misc.h
+++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
@@ -86,6 +86,10 @@
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
+#ifndef __used
+#define __used __attribute__((used))
+#endif
+
#if defined(__TARGET_ARCH_x86)
#define SYSCALL_WRAPPER 1
#define SYS_PREFIX "__x64_"
diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
index 759eb5c245cd..c2f0e18af951 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
@@ -1378,3 +1378,310 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb)
return 0;
}
+
+/* bpf_dynptr_adjust can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_adjust_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_adjust(&ptr, 1, 2);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_null can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_is_null_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_is_null(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_is_rdonly_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_is_rdonly(&ptr);
+
+ return 0;
+}
+
+/* bpf_dynptr_size can only be called on initialized dynptrs */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int dynptr_size_invalid(void *ctx)
+{
+ struct bpf_dynptr ptr;
+
+ /* this should fail */
+ bpf_dynptr_size(&ptr);
+
+ return 0;
+}
+
+/* Only initialized dynptrs can be cloned */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #1")
+int clone_invalid1(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &ptr2);
+
+ return 0;
+}
+
+/* Can't overwrite an existing dynptr when cloning */
+SEC("?xdp")
+__failure __msg("cannot overwrite referenced dynptr")
+int clone_invalid2(struct xdp_md *xdp)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr clone;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr1);
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone);
+
+ /* this should fail */
+ bpf_dynptr_clone(&ptr1, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its clones */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #3")
+int clone_invalidate1(void *ctx)
+{
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its parent */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #3")
+int clone_invalidate2(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate its siblings */
+SEC("?raw_tp")
+__failure __msg("Expected an initialized dynptr as arg #3")
+int clone_invalidate3(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ char read_data[64];
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0);
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its clones
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate4(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_data(&clone, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its parent
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate5(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+ data = bpf_dynptr_data(&ptr, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_dynptr_clone(&ptr, &clone);
+
+ bpf_ringbuf_submit_dynptr(&clone, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Invalidating a dynptr should invalidate any data slices
+ * of its sibling
+ */
+SEC("?raw_tp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_invalidate6(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ struct bpf_dynptr clone1;
+ struct bpf_dynptr clone2;
+ int *data;
+
+ bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone1);
+
+ bpf_dynptr_clone(&ptr, &clone2);
+
+ data = bpf_dynptr_data(&clone1, 0, sizeof(val));
+ if (!data)
+ return 0;
+
+ bpf_ringbuf_submit_dynptr(&clone2, 0);
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A skb clone's data slices should be invalid anytime packet data changes */
+SEC("?tc")
+__failure __msg("invalid mem access 'scalar'")
+int clone_skb_packet_data(struct __sk_buff *skb)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ __u32 *data;
+
+ bpf_dynptr_from_skb(skb, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_skb_pull_data(skb, skb->len))
+ return SK_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* A xdp clone's data slices should be invalid anytime packet data changes */
+SEC("?xdp")
+__failure __msg("invalid mem access 'scalar'")
+int clone_xdp_packet_data(struct xdp_md *xdp)
+{
+ char buffer[sizeof(__u32)] = {};
+ struct bpf_dynptr clone;
+ struct bpf_dynptr ptr;
+ struct ethhdr *hdr;
+ __u32 *data;
+
+ bpf_dynptr_from_xdp(xdp, 0, &ptr);
+
+ bpf_dynptr_clone(&ptr, &clone);
+ data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer));
+ if (!data)
+ return XDP_DROP;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr)))
+ return XDP_DROP;
+
+ /* this should fail */
+ *data = 123;
+
+ return 0;
+}
+
+/* Buffers that are provided must be sufficiently long */
+SEC("?cgroup_skb/egress")
+__failure __msg("memory, len pair leads to invalid memory access")
+int test_dynptr_skb_small_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char buffer[8] = {};
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, buffer, 9);
+
+ return !!data;
+}
diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c
index b2fa6c47ecc0..0c053976f8f9 100644
--- a/tools/testing/selftests/bpf/progs/dynptr_success.c
+++ b/tools/testing/selftests/bpf/progs/dynptr_success.c
@@ -207,3 +207,339 @@ int test_dynptr_skb_data(struct __sk_buff *skb)
return 1;
}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust(void *ctx)
+{
+ struct bpf_dynptr ptr;
+ __u32 bytes = 64;
+ __u32 off = 10;
+ __u32 trim = 15;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr);
+ if (err) {
+ err = 1;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes) {
+ err = 2;
+ goto done;
+ }
+
+ /* Advance the dynptr by off */
+ err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr));
+ if (err) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_size(&ptr) != bytes - off) {
+ err = 4;
+ goto done;
+ }
+
+ /* Trim the dynptr */
+ err = bpf_dynptr_adjust(&ptr, off, 15);
+ if (err) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that the size was adjusted correctly */
+ if (bpf_dynptr_size(&ptr) != trim - off) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_adjust_err(void *ctx)
+{
+ char write_data[45] = "hello there, world!!";
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+ __u32 off = 20;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* Check that start can't be greater than end */
+ if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) {
+ err = 2;
+ goto done;
+ }
+
+ /* Check that start can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) {
+ err = 3;
+ goto done;
+ }
+
+ /* Check that end can't be greater than size */
+ if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) {
+ err = 4;
+ goto done;
+ }
+
+ if (bpf_dynptr_adjust(&ptr, off, size)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Check that you can't write more bytes than available into the dynptr
+ * after you've adjusted it
+ */
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 6;
+ goto done;
+ }
+
+ /* Check that even after adjusting, submitting/discarding
+ * a ringbuf dynptr works
+ */
+ bpf_ringbuf_submit_dynptr(&ptr, 0);
+ return 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_zero_size_dynptr(void *ctx)
+{
+ char write_data = 'x', read_data;
+ struct bpf_dynptr ptr;
+ __u32 size = 64;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) {
+ err = 1;
+ goto done;
+ }
+
+ /* After this, the dynptr has a size of 0 */
+ if (bpf_dynptr_adjust(&ptr, size, size)) {
+ err = 2;
+ goto done;
+ }
+
+ /* Test that reading + writing non-zero bytes is not ok */
+ if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) {
+ err = 3;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) {
+ err = 4;
+ goto done;
+ }
+
+ /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */
+ if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) {
+ err = 5;
+ goto done;
+ }
+
+ if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) {
+ err = 6;
+ goto done;
+ }
+
+ err = 0;
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr, 0);
+ return 0;
+}
+
+SEC("tp/syscalls/sys_enter_nanosleep")
+int test_dynptr_is_null(void *ctx)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u64 size = 4;
+
+ if (bpf_get_current_pid_tgid() >> 32 != pid)
+ return 0;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ goto exit_early;
+ }
+
+ /* Test that the invalid dynptr is null */
+ if (!bpf_dynptr_is_null(&ptr1)) {
+ err = 2;
+ goto exit_early;
+ }
+
+ /* Get a valid dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) {
+ err = 3;
+ goto exit;
+ }
+
+ /* Test that the valid dynptr is not null */
+ if (bpf_dynptr_is_null(&ptr2)) {
+ err = 4;
+ goto exit;
+ }
+
+exit:
+ bpf_ringbuf_discard_dynptr(&ptr2, 0);
+exit_early:
+ bpf_ringbuf_discard_dynptr(&ptr1, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_is_rdonly(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ struct bpf_dynptr ptr3;
+
+ /* Pass in invalid flags, get back an invalid dynptr */
+ if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) {
+ err = 1;
+ return 0;
+ }
+
+ /* Test that an invalid dynptr is_rdonly returns false */
+ if (bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 2;
+ return 0;
+ }
+
+ /* Get a read-only dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (!bpf_dynptr_is_rdonly(&ptr2)) {
+ err = 4;
+ return 0;
+ }
+
+ /* Get a read-writeable dynptr */
+ if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) {
+ err = 5;
+ goto done;
+ }
+
+ /* Test that the dynptr is read-only */
+ if (bpf_dynptr_is_rdonly(&ptr3)) {
+ err = 6;
+ goto done;
+ }
+
+done:
+ bpf_ringbuf_discard_dynptr(&ptr3, 0);
+ return 0;
+}
+
+SEC("cgroup_skb/egress")
+int test_dynptr_clone(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr1;
+ struct bpf_dynptr ptr2;
+ __u32 off = 2, size;
+
+ /* Get a dynptr */
+ if (bpf_dynptr_from_skb(skb, 0, &ptr1)) {
+ err = 1;
+ return 0;
+ }
+
+ if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) {
+ err = 2;
+ return 0;
+ }
+
+ /* Clone the dynptr */
+ if (bpf_dynptr_clone(&ptr1, &ptr2)) {
+ err = 3;
+ return 0;
+ }
+
+ size = bpf_dynptr_size(&ptr1);
+
+ /* Check that the clone has the same size and rd-only */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 4;
+ return 0;
+ }
+
+ if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) {
+ err = 5;
+ return 0;
+ }
+
+ /* Advance and trim the original dynptr */
+ bpf_dynptr_adjust(&ptr1, 5, 5);
+
+ /* Check that only original dynptr was affected, and the clone wasn't */
+ if (bpf_dynptr_size(&ptr2) != size) {
+ err = 6;
+ return 0;
+ }
+
+ return 0;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_no_buff(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ __u64 *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 1);
+
+ return !!data;
+}
+
+SEC("?cgroup_skb/egress")
+int test_dynptr_skb_strcmp(struct __sk_buff *skb)
+{
+ struct bpf_dynptr ptr;
+ char *data;
+
+ if (bpf_dynptr_from_skb(skb, 0, &ptr)) {
+ err = 1;
+ return 1;
+ }
+
+ /* This may return NULL. SKB may require a buffer */
+ data = bpf_dynptr_slice(&ptr, 0, NULL, 10);
+ if (data) {
+ bpf_strncmp(data, 10, "foo");
+ return 1;
+ }
+
+ return 1;
+}
diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c
index be16143ae292..6b9b3c56f009 100644
--- a/tools/testing/selftests/bpf/progs/iters.c
+++ b/tools/testing/selftests/bpf/progs/iters.c
@@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx)
return sum;
}
-#define ARR_SZ 16
-
-static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul)
+static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
{
- int *t;
- __u64 i;
+ int *t, i;
while ((t = bpf_iter_num_next(it))) {
i = *t;
- if (i >= ARR_SZ)
+ if (i >= n)
break;
arr[i] = i * mul;
}
}
-static __noinline int sum(struct bpf_iter_num *it, int *arr)
+static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
{
- int *t, sum = 0;;
- __u64 i;
+ int *t, i, sum = 0;;
while ((t = bpf_iter_num_next(it))) {
i = *t;
- if (i >= ARR_SZ)
+ if (i >= n)
break;
sum += arr[i];
}
@@ -685,7 +681,7 @@ SEC("raw_tp")
__success
int iter_pass_iter_ptr_to_subprog(const void *ctx)
{
- int arr1[ARR_SZ], arr2[ARR_SZ];
+ int arr1[16], arr2[32];
struct bpf_iter_num it;
int n, sum1, sum2;
@@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx)
/* fill arr1 */
n = ARRAY_SIZE(arr1);
bpf_iter_num_new(&it, 0, n);
- fill(&it, arr1, 2);
+ fill(&it, arr1, n, 2);
bpf_iter_num_destroy(&it);
/* fill arr2 */
n = ARRAY_SIZE(arr2);
bpf_iter_num_new(&it, 0, n);
- fill(&it, arr2, 10);
+ fill(&it, arr2, n, 10);
bpf_iter_num_destroy(&it);
/* sum arr1 */
n = ARRAY_SIZE(arr1);
bpf_iter_num_new(&it, 0, n);
- sum1 = sum(&it, arr1);
+ sum1 = sum(&it, arr1, n);
bpf_iter_num_destroy(&it);
/* sum arr2 */
n = ARRAY_SIZE(arr2);
bpf_iter_num_new(&it, 0, n);
- sum2 = sum(&it, arr2);
+ sum2 = sum(&it, arr2, n);
bpf_iter_num_destroy(&it);
bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c
index b85fc8c423ba..17a9f59bf5f3 100644
--- a/tools/testing/selftests/bpf/progs/test_global_func1.c
+++ b/tools/testing/selftests/bpf/progs/test_global_func1.c
@@ -10,6 +10,8 @@
static __attribute__ ((noinline))
int f0(int var, struct __sk_buff *skb)
{
+ asm volatile ("");
+
return skb->len;
}
diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
new file mode 100644
index 000000000000..56cdc0a553f0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Bytedance */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
+long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
+void bpf_task_release(struct task_struct *p) __ksym;
+
+const volatile int local_pid;
+const volatile __u64 cgid;
+int remote_pid;
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags)
+{
+ struct cgroup *cgrp = NULL;
+ struct task_struct *acquired;
+
+ if (local_pid != (bpf_get_current_pid_tgid() >> 32))
+ return 0;
+
+ acquired = bpf_task_acquire(task);
+ if (!acquired)
+ return 0;
+
+ if (local_pid == acquired->tgid)
+ goto out;
+
+ cgrp = bpf_cgroup_from_id(cgid);
+ if (!cgrp)
+ goto out;
+
+ if (bpf_task_under_cgroup(acquired, cgrp))
+ remote_pid = acquired->tgid;
+
+out:
+ if (cgrp)
+ bpf_cgroup_release(cgrp);
+ bpf_task_release(acquired);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
new file mode 100644
index 000000000000..db6b3143338b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+
+int vals[] SEC(".data.vals") = {1, 2, 3, 4};
+
+__naked __noinline __used
+static unsigned long identity_subprog()
+{
+ /* the simplest *static* 64-bit identity function */
+ asm volatile (
+ "r0 = r1;"
+ "exit;"
+ );
+}
+
+__noinline __used
+unsigned long global_identity_subprog(__u64 x)
+{
+ /* the simplest *global* 64-bit identity function */
+ return x;
+}
+
+__naked __noinline __used
+static unsigned long callback_subprog()
+{
+ /* the simplest callback function */
+ asm volatile (
+ "r0 = 0;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r0")
+__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int subprog_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * this whole chain will have to be marked as precise later
+ */
+ "r1 = r6;"
+ "call identity_subprog;"
+ /* now use subprog's returned value (which is a
+ * r6 -> r1 -> r0 chain), as index into vals array, forcing
+ * all of that to be known precisely
+ */
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0->r1->r6 chain is forced to be precise and has to be
+ * propagated back to the beginning, including through the
+ * subprog call
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r0")
+__msg("mark_precise: frame0: last_idx 9 first_idx 0")
+__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4")
+__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1")
+__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7")
+__naked int global_subprog_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ /* pass r6 through r1 into subprog to get it back as r0;
+ * given global_identity_subprog is global, precision won't
+ * propagate all the way back to r6
+ */
+ "r1 = r6;"
+ "call global_identity_subprog;"
+ /* now use subprog's returned value (which is unknown now, so
+ * we need to clamp it), as index into vals array, forcing r0
+ * to be marked precise (with no effect on r6, though)
+ */
+ "if r0 < %[vals_arr_sz] goto 1f;"
+ "r0 = %[vals_arr_sz] - 1;"
+ "1:"
+ "r0 *= 4;"
+ "r1 = %[vals];"
+ /* here r0 is forced to be precise and has to be
+ * propagated back to the global subprog call, but it
+ * shouldn't go all the way to mark r6 as precise
+ */
+ "r1 += r0;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_const(vals_arr_sz, ARRAY_SIZE(vals))
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("14: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 14 first_idx 10")
+__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4")
+__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0")
+__msg("mark_precise: frame0: parent state regs=r0 stack=:")
+__msg("mark_precise: frame0: last_idx 18 first_idx 0")
+__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit")
+__naked int callback_result_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and use result; r0 shouldn't propagate back to
+ * callback_subprog
+ */
+ "r1 = r6;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r6 = r0;"
+ "if r6 > 3 goto 1f;"
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the bpf_loop() call, but not beyond
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "1:"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 7 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call identity_subprog;"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("7: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 7 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5")
+__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise_global(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call global_identity_subprog;"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("12: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 12 first_idx 10")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4")
+__msg("mark_precise: frame0: parent state regs=r6 stack=:")
+__msg("mark_precise: frame0: last_idx 16 first_idx 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+__naked int parent_callee_saved_reg_precise_with_callback(void)
+{
+ asm volatile (
+ "r6 = 3;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 1;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) callback call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 6")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 13 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1")
+__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_stack_slot_precise(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call identity_subprog;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("9: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 9 first_idx 6")
+__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 5 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6")
+__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3")
+__naked int parent_stack_slot_precise_global(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* call subprog and ignore result; we need this call only to
+ * complicate jump history
+ */
+ "r1 = 0;"
+ "call global_identity_subprog;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("14: (0f) r1 += r6")
+__msg("mark_precise: frame0: last_idx 14 first_idx 11")
+__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7")
+__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4")
+__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)")
+__msg("mark_precise: frame0: parent state regs= stack=-8:")
+__msg("mark_precise: frame0: last_idx 18 first_idx 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
+__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0")
+__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181")
+__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0")
+__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8")
+__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3")
+__naked int parent_stack_slot_precise_with_callback(void)
+{
+ asm volatile (
+ /* spill reg */
+ "r6 = 3;"
+ "*(u64 *)(r10 - 8) = r6;"
+
+ /* ensure we have callback frame in jump history */
+ "r1 = r6;" /* nr_loops */
+ "r2 = %[callback_subprog];" /* callback_fn */
+ "r3 = 0;" /* callback_ctx */
+ "r4 = 0;" /* flags */
+ "call %[bpf_loop];"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r6 = *(u64 *)(r10 - 8);"
+
+ "r6 *= 4;"
+ "r1 = %[vals];"
+ /* here r6 is forced to be precise and has to be propagated
+ * back to the beginning, handling (and ignoring) subprog call
+ */
+ "r1 += r6;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals),
+ __imm_ptr(callback_subprog),
+ __imm(bpf_loop)
+ : __clobber_common, "r6"
+ );
+}
+
+__noinline __used
+static __u64 subprog_with_precise_arg(__u64 x)
+{
+ return vals[x]; /* x is forced to be precise */
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("8: (0f) r2 += r1")
+__msg("mark_precise: frame1: last_idx 8 first_idx 0")
+__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ")
+__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2")
+__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2")
+__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6")
+__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3")
+__naked int subprog_arg_precise(void)
+{
+ asm volatile (
+ "r6 = 3;"
+ "r1 = r6;"
+ /* subprog_with_precise_arg expects its argument to be
+ * precise, so r1->r6 will be marked precise from inside the
+ * subprog
+ */
+ "call subprog_with_precise_arg;"
+ "r0 += r6;"
+ "exit;"
+ :
+ :
+ : __clobber_common, "r6"
+ );
+}
+
+/* r1 is pointer to stack slot;
+ * r2 is a register to spill into that slot
+ * subprog also spills r2 into its own stack slot
+ */
+__naked __noinline __used
+static __u64 subprog_spill_reg_precise(void)
+{
+ asm volatile (
+ /* spill to parent stack */
+ "*(u64 *)(r1 + 0) = r2;"
+ /* spill to subprog stack (we use -16 offset to avoid
+ * accidental confusion with parent's -8 stack slot in
+ * verifier log output)
+ */
+ "*(u64 *)(r10 - 16) = r2;"
+ /* use both spills as return result to propagete precision everywhere */
+ "r0 = *(u64 *)(r10 - 16);"
+ "r2 = *(u64 *)(r1 + 0);"
+ "r0 += r2;"
+ "exit;"
+ );
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+/* precision backtracking can't currently handle stack access not through r10,
+ * so we won't be able to mark stack slot fp-8 as precise, and so will
+ * fallback to forcing all as precise
+ */
+__msg("mark_precise: frame0: falling back to forcing all scalars precise")
+__naked int subprog_spill_into_parent_stack_slot_precise(void)
+{
+ asm volatile (
+ "r6 = 1;"
+
+ /* pass pointer to stack slot and r6 to subprog;
+ * r6 will be marked precise and spilled into fp-8 slot, which
+ * also should be marked precise
+ */
+ "r1 = r10;"
+ "r1 += -8;"
+ "r2 = r6;"
+ "call subprog_spill_reg_precise;"
+
+ /* restore reg from stack; in this case we'll be carrying
+ * stack mask when going back into subprog through jump
+ * history
+ */
+ "r7 = *(u64 *)(r10 - 8);"
+
+ "r7 *= 4;"
+ "r1 = %[vals];"
+ /* here r7 is forced to be precise and has to be propagated
+ * back to the beginning, handling subprog call and logic
+ */
+ "r1 += r7;"
+ "r0 = *(u32 *)(r1 + 0);"
+ "exit;"
+ :
+ : __imm_ptr(vals)
+ : __clobber_common, "r6", "r7"
+ );
+}
+
+__naked __noinline __used
+static __u64 subprog_with_checkpoint(void)
+{
+ asm volatile (
+ "r0 = 0;"
+ /* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */
+ "goto +0;"
+ "exit;"
+ );
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
index e1c787815e44..b2dfd7066c6e 100644
--- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c
@@ -77,7 +77,9 @@ int rx(struct xdp_md *ctx)
}
err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp);
- if (err)
+ if (!err)
+ meta->xdp_timestamp = bpf_ktime_get_tai_ns();
+ else
meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */
err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type);
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index ea82921110da..793689dcc170 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -714,7 +714,13 @@ static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
const char *argp_program_version = "test_progs 0.1";
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
-static const char argp_program_doc[] = "BPF selftests test runner";
+static const char argp_program_doc[] =
+"BPF selftests test runner\v"
+"Options accepting the NAMES parameter take either a comma-separated list\n"
+"of test names, or a filename prefixed with @. The file contains one name\n"
+"(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
+"\n"
+"These options can be passed repeatedly to read multiple files.\n";
enum ARG_KEYS {
ARG_TEST_NUM = 'n',
@@ -797,6 +803,7 @@ extern int extra_prog_load_log_flags;
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
struct test_env *env = state->input;
+ int err = 0;
switch (key) {
case ARG_TEST_NUM: {
@@ -821,18 +828,28 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
case ARG_TEST_NAME_GLOB_ALLOWLIST:
case ARG_TEST_NAME: {
- if (parse_test_list(arg,
- &env->test_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
- return -ENOMEM;
+ if (arg[0] == '@')
+ err = parse_test_list_file(arg + 1,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST);
+ else
+ err = parse_test_list(arg,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST);
+
break;
}
case ARG_TEST_NAME_GLOB_DENYLIST:
case ARG_TEST_NAME_BLACKLIST: {
- if (parse_test_list(arg,
- &env->test_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
- return -ENOMEM;
+ if (arg[0] == '@')
+ err = parse_test_list_file(arg + 1,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST);
+ else
+ err = parse_test_list(arg,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST);
+
break;
}
case ARG_VERIFIER_STATS:
@@ -900,7 +917,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
default:
return ARGP_ERR_UNKNOWN;
}
- return 0;
+ return err;
}
/*
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 0b5e0829e5be..dc9595ade8de 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
/* Copyright (C) 2020 Facebook, Inc. */
+#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
@@ -70,92 +71,168 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
return 0;
}
-int parse_test_list(const char *s,
- struct test_filter_set *set,
- bool is_glob_pattern)
+static int do_insert_test(struct test_filter_set *set,
+ char *test_str,
+ char *subtest_str)
{
- char *input, *state = NULL, *next;
- struct test_filter *tmp, *tests = NULL;
- int i, j, cnt = 0;
+ struct test_filter *tmp, *test;
+ char **ctmp;
+ int i;
- input = strdup(s);
- if (!input)
+ for (i = 0; i < set->cnt; i++) {
+ test = &set->tests[i];
+
+ if (strcmp(test_str, test->name) == 0) {
+ free(test_str);
+ goto subtest;
+ }
+ }
+
+ tmp = realloc(set->tests, sizeof(*test) * (set->cnt + 1));
+ if (!tmp)
return -ENOMEM;
- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
- char *subtest_str = strchr(next, '/');
- char *pattern = NULL;
- int glob_chars = 0;
+ set->tests = tmp;
+ test = &set->tests[set->cnt];
- tmp = realloc(tests, sizeof(*tests) * (cnt + 1));
- if (!tmp)
- goto err;
- tests = tmp;
+ test->name = test_str;
+ test->subtests = NULL;
+ test->subtest_cnt = 0;
- tests[cnt].subtest_cnt = 0;
- tests[cnt].subtests = NULL;
+ set->cnt++;
- if (is_glob_pattern) {
- pattern = "%s";
- } else {
- pattern = "*%s*";
- glob_chars = 2;
- }
+subtest:
+ if (!subtest_str)
+ return 0;
- if (subtest_str) {
- char **tmp_subtests = NULL;
- int subtest_cnt = tests[cnt].subtest_cnt;
-
- *subtest_str = '\0';
- subtest_str += 1;
- tmp_subtests = realloc(tests[cnt].subtests,
- sizeof(*tmp_subtests) *
- (subtest_cnt + 1));
- if (!tmp_subtests)
- goto err;
- tests[cnt].subtests = tmp_subtests;
-
- tests[cnt].subtests[subtest_cnt] =
- malloc(strlen(subtest_str) + glob_chars + 1);
- if (!tests[cnt].subtests[subtest_cnt])
- goto err;
- sprintf(tests[cnt].subtests[subtest_cnt],
- pattern,
- subtest_str);
-
- tests[cnt].subtest_cnt++;
+ for (i = 0; i < test->subtest_cnt; i++) {
+ if (strcmp(subtest_str, test->subtests[i]) == 0) {
+ free(subtest_str);
+ return 0;
}
+ }
- tests[cnt].name = malloc(strlen(next) + glob_chars + 1);
- if (!tests[cnt].name)
- goto err;
- sprintf(tests[cnt].name, pattern, next);
+ ctmp = realloc(test->subtests,
+ sizeof(*test->subtests) * (test->subtest_cnt + 1));
+ if (!ctmp)
+ return -ENOMEM;
- cnt++;
+ test->subtests = ctmp;
+ test->subtests[test->subtest_cnt] = subtest_str;
+
+ test->subtest_cnt++;
+
+ return 0;
+}
+
+static int insert_test(struct test_filter_set *set,
+ char *test_spec,
+ bool is_glob_pattern)
+{
+ char *pattern, *subtest_str, *ext_test_str, *ext_subtest_str = NULL;
+ int glob_chars = 0;
+
+ if (is_glob_pattern) {
+ pattern = "%s";
+ } else {
+ pattern = "*%s*";
+ glob_chars = 2;
}
- tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt));
- if (!tmp)
+ subtest_str = strchr(test_spec, '/');
+ if (subtest_str) {
+ *subtest_str = '\0';
+ subtest_str += 1;
+ }
+
+ ext_test_str = malloc(strlen(test_spec) + glob_chars + 1);
+ if (!ext_test_str)
goto err;
- memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt);
- set->tests = tmp;
- set->cnt += cnt;
+ sprintf(ext_test_str, pattern, test_spec);
- free(tests);
- free(input);
- return 0;
+ if (subtest_str) {
+ ext_subtest_str = malloc(strlen(subtest_str) + glob_chars + 1);
+ if (!ext_subtest_str)
+ goto err;
+
+ sprintf(ext_subtest_str, pattern, subtest_str);
+ }
+
+ return do_insert_test(set, ext_test_str, ext_subtest_str);
err:
- for (i = 0; i < cnt; i++) {
- for (j = 0; j < tests[i].subtest_cnt; j++)
- free(tests[i].subtests[j]);
+ free(ext_test_str);
+ free(ext_subtest_str);
- free(tests[i].name);
+ return -ENOMEM;
+}
+
+int parse_test_list_file(const char *path,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *buf = NULL, *capture_start, *capture_end, *scan_end;
+ size_t buflen = 0;
+ int err = 0;
+ FILE *f;
+
+ f = fopen(path, "r");
+ if (!f) {
+ err = -errno;
+ fprintf(stderr, "Failed to open '%s': %d\n", path, err);
+ return err;
+ }
+
+ while (getline(&buf, &buflen, f) != -1) {
+ capture_start = buf;
+
+ while (isspace(*capture_start))
+ ++capture_start;
+
+ capture_end = capture_start;
+ scan_end = capture_start;
+
+ while (*scan_end && *scan_end != '#') {
+ if (!isspace(*scan_end))
+ capture_end = scan_end;
+
+ ++scan_end;
+ }
+
+ if (capture_end == capture_start)
+ continue;
+
+ *(++capture_end) = '\0';
+
+ err = insert_test(set, capture_start, is_glob_pattern);
+ if (err)
+ break;
+ }
+
+ fclose(f);
+ return err;
+}
+
+int parse_test_list(const char *s,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *input, *state = NULL, *test_spec;
+ int err = 0;
+
+ input = strdup(s);
+ if (!input)
+ return -ENOMEM;
+
+ while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) {
+ err = insert_test(set, test_spec, is_glob_pattern);
+ if (err)
+ break;
}
- free(tests);
+
free(input);
- return -ENOMEM;
+ return err;
}
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index eb8790f928e4..98f09bbae86f 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -20,5 +20,8 @@ struct test_filter_set;
int parse_test_list(const char *s,
struct test_filter_set *test_set,
bool is_glob_pattern);
+int parse_test_list_file(const char *path,
+ struct test_filter_set *test_set,
+ bool is_glob_pattern);
__u64 read_perf_max_sample_freq(void);
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 6c03a7d805f9..b8c0aae8e7ec 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -38,25 +38,24 @@
.fixup_map_array_48b = { 1 },
.result = VERBOSE_ACCEPT,
.errstr =
- "26: (85) call bpf_probe_read_kernel#113\
- last_idx 26 first_idx 20\
- regs=4 stack=0 before 25\
- regs=4 stack=0 before 24\
- regs=4 stack=0 before 23\
- regs=4 stack=0 before 22\
- regs=4 stack=0 before 20\
- parent didn't have regs=4 stack=0 marks\
- last_idx 19 first_idx 10\
- regs=4 stack=0 before 19\
- regs=200 stack=0 before 18\
- regs=300 stack=0 before 17\
- regs=201 stack=0 before 15\
- regs=201 stack=0 before 14\
- regs=200 stack=0 before 13\
- regs=200 stack=0 before 12\
- regs=200 stack=0 before 11\
- regs=200 stack=0 before 10\
- parent already had regs=0 stack=0 marks",
+ "mark_precise: frame0: last_idx 26 first_idx 20\
+ mark_precise: frame0: regs=r2 stack= before 25\
+ mark_precise: frame0: regs=r2 stack= before 24\
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: regs=r2 stack= before 20\
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 10\
+ mark_precise: frame0: regs=r2 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: regs=r0,r9 stack= before 15\
+ mark_precise: frame0: regs=r0,r9 stack= before 14\
+ mark_precise: frame0: regs=r9 stack= before 13\
+ mark_precise: frame0: regs=r9 stack= before 12\
+ mark_precise: frame0: regs=r9 stack= before 11\
+ mark_precise: frame0: regs=r9 stack= before 10\
+ mark_precise: frame0: parent state regs= stack=:",
},
{
"precise: test 2",
@@ -100,20 +99,20 @@
.flags = BPF_F_TEST_STATE_FREQ,
.errstr =
"26: (85) call bpf_probe_read_kernel#113\
- last_idx 26 first_idx 22\
- regs=4 stack=0 before 25\
- regs=4 stack=0 before 24\
- regs=4 stack=0 before 23\
- regs=4 stack=0 before 22\
- parent didn't have regs=4 stack=0 marks\
- last_idx 20 first_idx 20\
- regs=4 stack=0 before 20\
- parent didn't have regs=4 stack=0 marks\
- last_idx 19 first_idx 17\
- regs=4 stack=0 before 19\
- regs=200 stack=0 before 18\
- regs=300 stack=0 before 17\
- parent already had regs=0 stack=0 marks",
+ mark_precise: frame0: last_idx 26 first_idx 22\
+ mark_precise: frame0: regs=r2 stack= before 25\
+ mark_precise: frame0: regs=r2 stack= before 24\
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 20 first_idx 20\
+ mark_precise: frame0: regs=r2 stack= before 20\
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 17\
+ mark_precise: frame0: regs=r2 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: parent state regs= stack=:",
},
{
"precise: cross frame pruning",
@@ -153,15 +152,16 @@
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
- .errstr = "5: (2d) if r4 > r0 goto pc+0\
- last_idx 5 first_idx 5\
- parent didn't have regs=10 stack=0 marks\
- last_idx 4 first_idx 2\
- regs=10 stack=0 before 4\
- regs=10 stack=0 before 3\
- regs=0 stack=1 before 2\
- last_idx 5 first_idx 5\
- parent didn't have regs=1 stack=0 marks",
+ .errstr = "mark_precise: frame0: last_idx 5 first_idx 5\
+ mark_precise: frame0: parent state regs=r4 stack=:\
+ mark_precise: frame0: last_idx 4 first_idx 2\
+ mark_precise: frame0: regs=r4 stack= before 4\
+ mark_precise: frame0: regs=r4 stack= before 3\
+ mark_precise: frame0: regs= stack=-8 before 2\
+ mark_precise: frame0: falling back to forcing all scalars precise\
+ force_precise: frame0: forcing r0 to be precise\
+ mark_precise: frame0: last_idx 5 first_idx 5\
+ mark_precise: frame0: parent state regs= stack=:",
.result = VERBOSE_ACCEPT,
.retval = -1,
},
@@ -179,16 +179,19 @@
},
.prog_type = BPF_PROG_TYPE_XDP,
.flags = BPF_F_TEST_STATE_FREQ,
- .errstr = "last_idx 6 first_idx 6\
- parent didn't have regs=10 stack=0 marks\
- last_idx 5 first_idx 3\
- regs=10 stack=0 before 5\
- regs=10 stack=0 before 4\
- regs=0 stack=1 before 3\
- last_idx 6 first_idx 6\
- parent didn't have regs=1 stack=0 marks\
- last_idx 5 first_idx 3\
- regs=1 stack=0 before 5",
+ .errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
+ mark_precise: frame0: parent state regs=r4 stack=:\
+ mark_precise: frame0: last_idx 5 first_idx 3\
+ mark_precise: frame0: regs=r4 stack= before 5\
+ mark_precise: frame0: regs=r4 stack= before 4\
+ mark_precise: frame0: regs= stack=-8 before 3\
+ mark_precise: frame0: falling back to forcing all scalars precise\
+ force_precise: frame0: forcing r0 to be precise\
+ force_precise: frame0: forcing r0 to be precise\
+ force_precise: frame0: forcing r0 to be precise\
+ force_precise: frame0: forcing r0 to be precise\
+ mark_precise: frame0: last_idx 6 first_idx 6\
+ mark_precise: frame0: parent state regs= stack=:",
.result = VERBOSE_ACCEPT,
.retval = -1,
},
@@ -217,3 +220,39 @@
.errstr = "invalid access to memory, mem_size=1 off=42 size=8",
.result = REJECT,
},
+{
+ "precise: program doesn't prematurely prune branches",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2),
+ BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4),
+ BPF_LD_MAP_FD(BPF_REG_4, 0),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10),
+ BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 13 },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "register with unbounded min value is not allowed",
+},
diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c
index 1db7185181da..655095810d4a 100644
--- a/tools/testing/selftests/bpf/veristat.c
+++ b/tools/testing/selftests/bpf/veristat.c
@@ -141,6 +141,7 @@ static struct env {
bool verbose;
bool debug;
bool quiet;
+ bool force_checkpoints;
enum resfmt out_fmt;
bool show_version;
bool comparison_mode;
@@ -209,6 +210,8 @@ static const struct argp_option opts[] = {
{ "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" },
{ "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" },
{ "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" },
+ { "test-states", 't', NULL, 0,
+ "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" },
{ "quiet", 'q', NULL, 0, "Quiet mode" },
{ "emit", 'e', "SPEC", 0, "Specify stats to be emitted" },
{ "sort", 's', "SPEC", 0, "Specify sort order" },
@@ -284,6 +287,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
argp_usage(state);
}
break;
+ case 't':
+ env.force_checkpoints = true;
+ break;
case 'C':
env.comparison_mode = true;
break;
@@ -989,6 +995,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf
/* increase chances of successful BPF object loading */
fixup_obj(obj, prog, base_filename);
+ if (env.force_checkpoints)
+ bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ);
+
err = bpf_object__load(obj);
env.progs_processed++;
diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c
index 987cf0db5ebc..613321eb84c1 100644
--- a/tools/testing/selftests/bpf/xdp_hw_metadata.c
+++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c
@@ -27,6 +27,7 @@
#include <sys/mman.h>
#include <net/if.h>
#include <poll.h>
+#include <time.h>
#include "xdp_metadata.h"
@@ -134,18 +135,52 @@ static void refill_rx(struct xsk *xsk, __u64 addr)
}
}
-static void verify_xdp_metadata(void *data)
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(clockid_t clock_id)
+{
+ struct timespec t;
+ int res;
+
+ /* See man clock_gettime(2) for type of clock_id's */
+ res = clock_gettime(clock_id, &t);
+
+ if (res < 0)
+ error(res, errno, "Error with clock_gettime()");
+
+ return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+static void verify_xdp_metadata(void *data, clockid_t clock_id)
{
struct xdp_meta *meta;
meta = data - sizeof(*meta);
- printf("rx_timestamp: %llu\n", meta->rx_timestamp);
if (meta->rx_hash_err < 0)
printf("No rx_hash err=%d\n", meta->rx_hash_err);
else
printf("rx_hash: 0x%X with RSS type:0x%X\n",
meta->rx_hash, meta->rx_hash_type);
+
+ printf("rx_timestamp: %llu (sec:%0.4f)\n", meta->rx_timestamp,
+ (double)meta->rx_timestamp / NANOSEC_PER_SEC);
+ if (meta->rx_timestamp) {
+ __u64 usr_clock = gettime(clock_id);
+ __u64 xdp_clock = meta->xdp_timestamp;
+ __s64 delta_X = xdp_clock - meta->rx_timestamp;
+ __s64 delta_X2U = usr_clock - xdp_clock;
+
+ printf("XDP RX-time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
+ xdp_clock, (double)xdp_clock / NANOSEC_PER_SEC,
+ (double)delta_X / NANOSEC_PER_SEC,
+ (double)delta_X / 1000);
+
+ printf("AF_XDP time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n",
+ usr_clock, (double)usr_clock / NANOSEC_PER_SEC,
+ (double)delta_X2U / NANOSEC_PER_SEC,
+ (double)delta_X2U / 1000);
+ }
+
}
static void verify_skb_metadata(int fd)
@@ -193,7 +228,7 @@ static void verify_skb_metadata(int fd)
printf("skb hwtstamp is not found!\n");
}
-static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
+static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id)
{
const struct xdp_desc *rx_desc;
struct pollfd fds[rxq + 1];
@@ -243,7 +278,8 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd)
addr = xsk_umem__add_offset_to_addr(rx_desc->addr);
printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n",
xsk, idx, rx_desc->addr, addr, comp_addr);
- verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr));
+ verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr),
+ clock_id);
xsk_ring_cons__release(&xsk->rx, 1);
refill_rx(xsk, comp_addr);
}
@@ -370,6 +406,7 @@ static void timestamping_enable(int fd, int val)
int main(int argc, char *argv[])
{
+ clockid_t clock_id = CLOCK_TAI;
int server_fd = -1;
int ret;
int i;
@@ -443,7 +480,7 @@ int main(int argc, char *argv[])
error(1, -ret, "bpf_xdp_attach");
signal(SIGINT, handle_signal);
- ret = verify_metadata(rx_xsk, rxq, server_fd);
+ ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id);
close(server_fd);
cleanup();
if (ret)
diff --git a/tools/testing/selftests/bpf/xdp_metadata.h b/tools/testing/selftests/bpf/xdp_metadata.h
index 0c4624dc6f2f..938a729bd307 100644
--- a/tools/testing/selftests/bpf/xdp_metadata.h
+++ b/tools/testing/selftests/bpf/xdp_metadata.h
@@ -11,6 +11,7 @@
struct xdp_meta {
__u64 rx_timestamp;
+ __u64 xdp_timestamp;
__u32 rx_hash;
union {
__u32 rx_hash_type;
diff --git a/tools/testing/selftests/cachestat/.gitignore b/tools/testing/selftests/cachestat/.gitignore
new file mode 100644
index 000000000000..d6c30b43a4bb
--- /dev/null
+++ b/tools/testing/selftests/cachestat/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+test_cachestat
diff --git a/tools/testing/selftests/cachestat/Makefile b/tools/testing/selftests/cachestat/Makefile
new file mode 100644
index 000000000000..fca73aaa7d14
--- /dev/null
+++ b/tools/testing/selftests/cachestat/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+TEST_GEN_PROGS := test_cachestat
+
+CFLAGS += $(KHDR_INCLUDES)
+CFLAGS += -Wall
+CFLAGS += -lrt
+
+include ../lib.mk
diff --git a/tools/testing/selftests/cachestat/test_cachestat.c b/tools/testing/selftests/cachestat/test_cachestat.c
new file mode 100644
index 000000000000..9be2262e5c17
--- /dev/null
+++ b/tools/testing/selftests/cachestat/test_cachestat.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <linux/kernel.h>
+#include <linux/mman.h>
+#include <sys/mman.h>
+#include <sys/shm.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include "../kselftest.h"
+
+static const char * const dev_files[] = {
+ "/dev/zero", "/dev/null", "/dev/urandom",
+ "/proc/version", "/proc"
+};
+static const int cachestat_nr = 451;
+
+void print_cachestat(struct cachestat *cs)
+{
+ ksft_print_msg(
+ "Using cachestat: Cached: %lu, Dirty: %lu, Writeback: %lu, Evicted: %lu, Recently Evicted: %lu\n",
+ cs->nr_cache, cs->nr_dirty, cs->nr_writeback,
+ cs->nr_evicted, cs->nr_recently_evicted);
+}
+
+bool write_exactly(int fd, size_t filesize)
+{
+ char data[filesize];
+ bool ret = true;
+ int random_fd = open("/dev/urandom", O_RDONLY);
+
+ if (random_fd < 0) {
+ ksft_print_msg("Unable to access urandom.\n");
+ ret = false;
+ goto out;
+ } else {
+ int remained = filesize;
+ char *cursor = data;
+
+ while (remained) {
+ ssize_t read_len = read(random_fd, cursor, remained);
+
+ if (read_len <= 0) {
+ ksft_print_msg("Unable to read from urandom.\n");
+ ret = false;
+ goto close_random_fd;
+ }
+
+ remained -= read_len;
+ cursor += read_len;
+ }
+
+ /* write random data to fd */
+ remained = filesize;
+ cursor = data;
+ while (remained) {
+ ssize_t write_len = write(fd, cursor, remained);
+
+ if (write_len <= 0) {
+ ksft_print_msg("Unable write random data to file.\n");
+ ret = false;
+ goto close_random_fd;
+ }
+
+ remained -= write_len;
+ cursor += write_len;
+ }
+ }
+
+close_random_fd:
+ close(random_fd);
+out:
+ return ret;
+}
+
+/*
+ * Open/create the file at filename, (optionally) write random data to it
+ * (exactly num_pages), then test the cachestat syscall on this file.
+ *
+ * If test_fsync == true, fsync the file, then check the number of dirty
+ * pages.
+ */
+bool test_cachestat(const char *filename, bool write_random, bool create,
+ bool test_fsync, unsigned long num_pages, int open_flags,
+ mode_t open_mode)
+{
+ size_t PS = sysconf(_SC_PAGESIZE);
+ int filesize = num_pages * PS;
+ bool ret = true;
+ long syscall_ret;
+ struct cachestat cs;
+ struct cachestat_range cs_range = { 0, filesize };
+
+ int fd = open(filename, open_flags, open_mode);
+
+ if (fd == -1) {
+ ksft_print_msg("Unable to create/open file.\n");
+ ret = false;
+ goto out;
+ } else {
+ ksft_print_msg("Create/open %s\n", filename);
+ }
+
+ if (write_random) {
+ if (!write_exactly(fd, filesize)) {
+ ksft_print_msg("Unable to access urandom.\n");
+ ret = false;
+ goto out1;
+ }
+ }
+
+ syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
+
+ ksft_print_msg("Cachestat call returned %ld\n", syscall_ret);
+
+ if (syscall_ret) {
+ ksft_print_msg("Cachestat returned non-zero.\n");
+ ret = false;
+ goto out1;
+
+ } else {
+ print_cachestat(&cs);
+
+ if (write_random) {
+ if (cs.nr_cache + cs.nr_evicted != num_pages) {
+ ksft_print_msg(
+ "Total number of cached and evicted pages is off.\n");
+ ret = false;
+ }
+ }
+ }
+
+ if (test_fsync) {
+ if (fsync(fd)) {
+ ksft_print_msg("fsync fails.\n");
+ ret = false;
+ } else {
+ syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
+
+ ksft_print_msg("Cachestat call (after fsync) returned %ld\n",
+ syscall_ret);
+
+ if (!syscall_ret) {
+ print_cachestat(&cs);
+
+ if (cs.nr_dirty) {
+ ret = false;
+ ksft_print_msg(
+ "Number of dirty should be zero after fsync.\n");
+ }
+ } else {
+ ksft_print_msg("Cachestat (after fsync) returned non-zero.\n");
+ ret = false;
+ goto out1;
+ }
+ }
+ }
+
+out1:
+ close(fd);
+
+ if (create)
+ remove(filename);
+out:
+ return ret;
+}
+
+bool test_cachestat_shmem(void)
+{
+ size_t PS = sysconf(_SC_PAGESIZE);
+ size_t filesize = PS * 512 * 2; /* 2 2MB huge pages */
+ int syscall_ret;
+ size_t compute_len = PS * 512;
+ struct cachestat_range cs_range = { PS, compute_len };
+ char *filename = "tmpshmcstat";
+ struct cachestat cs;
+ bool ret = true;
+ unsigned long num_pages = compute_len / PS;
+ int fd = shm_open(filename, O_CREAT | O_RDWR, 0600);
+
+ if (fd < 0) {
+ ksft_print_msg("Unable to create shmem file.\n");
+ ret = false;
+ goto out;
+ }
+
+ if (ftruncate(fd, filesize)) {
+ ksft_print_msg("Unable to truncate shmem file.\n");
+ ret = false;
+ goto close_fd;
+ }
+
+ if (!write_exactly(fd, filesize)) {
+ ksft_print_msg("Unable to write to shmem file.\n");
+ ret = false;
+ goto close_fd;
+ }
+
+ syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0);
+
+ if (syscall_ret) {
+ ksft_print_msg("Cachestat returned non-zero.\n");
+ ret = false;
+ goto close_fd;
+ } else {
+ print_cachestat(&cs);
+ if (cs.nr_cache + cs.nr_evicted != num_pages) {
+ ksft_print_msg(
+ "Total number of cached and evicted pages is off.\n");
+ ret = false;
+ }
+ }
+
+close_fd:
+ shm_unlink(filename);
+out:
+ return ret;
+}
+
+int main(void)
+{
+ int ret = 0;
+
+ for (int i = 0; i < 5; i++) {
+ const char *dev_filename = dev_files[i];
+
+ if (test_cachestat(dev_filename, false, false, false,
+ 4, O_RDONLY, 0400))
+ ksft_test_result_pass("cachestat works with %s\n", dev_filename);
+ else {
+ ksft_test_result_fail("cachestat fails with %s\n", dev_filename);
+ ret = 1;
+ }
+ }
+
+ if (test_cachestat("tmpfilecachestat", true, true,
+ true, 4, O_CREAT | O_RDWR, 0400 | 0600))
+ ksft_test_result_pass("cachestat works with a normal file\n");
+ else {
+ ksft_test_result_fail("cachestat fails with normal file\n");
+ ret = 1;
+ }
+
+ if (test_cachestat_shmem())
+ ksft_test_result_pass("cachestat works with a shmem file\n");
+ else {
+ ksft_test_result_fail("cachestat fails with a shmem file\n");
+ ret = 1;
+ }
+
+ return ret;
+}
diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
index e495f895a2cd..e60cf4da8fb0 100644
--- a/tools/testing/selftests/clone3/clone3.c
+++ b/tools/testing/selftests/clone3/clone3.c
@@ -129,7 +129,7 @@ int main(int argc, char *argv[])
uid_t uid = getuid();
ksft_print_header();
- ksft_set_plan(18);
+ ksft_set_plan(19);
test_clone3_supported();
/* Just a simple clone3() should return 0.*/
@@ -198,5 +198,8 @@ int main(int argc, char *argv[])
/* Do a clone3() in a new time namespace */
test_clone3(CLONE_NEWTIME, 0, 0, CLONE3_ARGS_NO_TEST);
+ /* Do a clone3() with exit signal (SIGCHLD) in flags */
+ test_clone3(SIGCHLD, 0, -EINVAL, CLONE3_ARGS_NO_TEST);
+
ksft_finished();
}
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
index d6e106fbce11..a1e955d2de4c 100644
--- a/tools/testing/selftests/ftrace/Makefile
+++ b/tools/testing/selftests/ftrace/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
all:
-TEST_PROGS := ftracetest
+TEST_PROGS_EXTENDED := ftracetest
+TEST_PROGS := ftracetest-ktap
TEST_FILES := test.d settings
EXTRA_CLEAN := $(OUTPUT)/logs/*
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index c3311c8c4089..2506621e75df 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -13,6 +13,7 @@ echo "Usage: ftracetest [options] [testcase(s)] [testcase-directory(s)]"
echo " Options:"
echo " -h|--help Show help message"
echo " -k|--keep Keep passed test logs"
+echo " -K|--ktap Output in KTAP format"
echo " -v|--verbose Increase verbosity of test messages"
echo " -vv Alias of -v -v (Show all results in stdout)"
echo " -vvv Alias of -v -v -v (Show all commands immediately)"
@@ -85,6 +86,10 @@ parse_opts() { # opts
KEEP_LOG=1
shift 1
;;
+ --ktap|-K)
+ KTAP=1
+ shift 1
+ ;;
--verbose|-v|-vv|-vvv)
if [ $VERBOSE -eq -1 ]; then
usage "--console can not use with --verbose"
@@ -178,6 +183,7 @@ TEST_DIR=$TOP_DIR/test.d
TEST_CASES=`find_testcases $TEST_DIR`
LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/
KEEP_LOG=0
+KTAP=0
DEBUG=0
VERBOSE=0
UNSUPPORTED_RESULT=0
@@ -229,7 +235,7 @@ prlog() { # messages
newline=
shift
fi
- printf "$*$newline"
+ [ "$KTAP" != "1" ] && printf "$*$newline"
[ "$LOG_FILE" ] && printf "$*$newline" | strip_esc >> $LOG_FILE
}
catlog() { #file
@@ -260,11 +266,11 @@ TOTAL_RESULT=0
INSTANCE=
CASENO=0
+CASENAME=
testcase() { # testfile
CASENO=$((CASENO+1))
- desc=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
- prlog -n "[$CASENO]$INSTANCE$desc"
+ CASENAME=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
}
checkreq() { # testfile
@@ -277,40 +283,68 @@ test_on_instance() { # testfile
grep -q "^#[ \t]*flags:.*instance" $1
}
+ktaptest() { # result comment
+ if [ "$KTAP" != "1" ]; then
+ return
+ fi
+
+ local result=
+ if [ "$1" = "1" ]; then
+ result="ok"
+ else
+ result="not ok"
+ fi
+ shift
+
+ local comment=$*
+ if [ "$comment" != "" ]; then
+ comment="# $comment"
+ fi
+
+ echo $CASENO $result $INSTANCE$CASENAME $comment
+}
+
eval_result() { # sigval
case $1 in
$PASS)
prlog " [${color_green}PASS${color_reset}]"
+ ktaptest 1
PASSED_CASES="$PASSED_CASES $CASENO"
return 0
;;
$FAIL)
prlog " [${color_red}FAIL${color_reset}]"
+ ktaptest 0
FAILED_CASES="$FAILED_CASES $CASENO"
return 1 # this is a bug.
;;
$UNRESOLVED)
prlog " [${color_blue}UNRESOLVED${color_reset}]"
+ ktaptest 0 UNRESOLVED
UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO"
return $UNRESOLVED_RESULT # depends on use case
;;
$UNTESTED)
prlog " [${color_blue}UNTESTED${color_reset}]"
+ ktaptest 1 SKIP
UNTESTED_CASES="$UNTESTED_CASES $CASENO"
return 0
;;
$UNSUPPORTED)
prlog " [${color_blue}UNSUPPORTED${color_reset}]"
+ ktaptest 1 SKIP
UNSUPPORTED_CASES="$UNSUPPORTED_CASES $CASENO"
return $UNSUPPORTED_RESULT # depends on use case
;;
$XFAIL)
prlog " [${color_green}XFAIL${color_reset}]"
+ ktaptest 1 XFAIL
XFAILED_CASES="$XFAILED_CASES $CASENO"
return 0
;;
*)
prlog " [${color_blue}UNDEFINED${color_reset}]"
+ ktaptest 0 error
UNDEFINED_CASES="$UNDEFINED_CASES $CASENO"
return 1 # this must be a test bug
;;
@@ -371,6 +405,7 @@ __run_test() { # testfile
run_test() { # testfile
local testname=`basename $1`
testcase $1
+ prlog -n "[$CASENO]$INSTANCE$CASENAME"
if [ ! -z "$LOG_FILE" ] ; then
local testlog=`mktemp $LOG_DIR/${CASENO}-${testname}-log.XXXXXX`
else
@@ -405,6 +440,17 @@ run_test() { # testfile
# load in the helper functions
. $TEST_DIR/functions
+if [ "$KTAP" = "1" ]; then
+ echo "TAP version 13"
+
+ casecount=`echo $TEST_CASES | wc -w`
+ for t in $TEST_CASES; do
+ test_on_instance $t || continue
+ casecount=$((casecount+1))
+ done
+ echo "1..${casecount}"
+fi
+
# Main loop
for t in $TEST_CASES; do
run_test $t
@@ -439,6 +485,17 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
+if [ "$KTAP" = "1" ]; then
+ echo -n "# Totals:"
+ echo -n " pass:"`echo $PASSED_CASES | wc -w`
+ echo -n " faii:"`echo $FAILED_CASES | wc -w`
+ echo -n " xfail:"`echo $XFAILED_CASES | wc -w`
+ echo -n " xpass:0"
+ echo -n " skip:"`echo $UNTESTED_CASES $UNSUPPORTED_CASES | wc -w`
+ echo -n " error:"`echo $UNRESOLVED_CASES $UNDEFINED_CASES | wc -w`
+ echo
+fi
+
cleanup
# if no error, return 0
diff --git a/tools/testing/selftests/ftrace/ftracetest-ktap b/tools/testing/selftests/ftrace/ftracetest-ktap
new file mode 100755
index 000000000000..b3284679ef3a
--- /dev/null
+++ b/tools/testing/selftests/ftrace/ftracetest-ktap
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ftracetest-ktap: Wrapper to integrate ftracetest with the kselftest runner
+#
+# Copyright (C) Arm Ltd., 2023
+
+./ftracetest -K
diff --git a/tools/testing/selftests/media_tests/video_device_test.c b/tools/testing/selftests/media_tests/video_device_test.c
index 0f6aef2e2593..2c44e115f2f0 100644
--- a/tools/testing/selftests/media_tests/video_device_test.c
+++ b/tools/testing/selftests/media_tests/video_device_test.c
@@ -37,45 +37,58 @@
#include <time.h>
#include <linux/videodev2.h>
-int main(int argc, char **argv)
+#define PRIORITY_MAX 4
+
+int priority_test(int fd)
{
- int opt;
- char video_dev[256];
- int count;
- struct v4l2_tuner vtuner;
- struct v4l2_capability vcap;
+ /* This test will try to update the priority associated with a file descriptor */
+
+ enum v4l2_priority old_priority, new_priority, priority_to_compare;
int ret;
- int fd;
+ int result = 0;
- if (argc < 2) {
- printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
- exit(-1);
+ ret = ioctl(fd, VIDIOC_G_PRIORITY, &old_priority);
+ if (ret < 0) {
+ printf("Failed to get priority: %s\n", strerror(errno));
+ return -1;
+ }
+ new_priority = (old_priority + 1) % PRIORITY_MAX;
+ ret = ioctl(fd, VIDIOC_S_PRIORITY, &new_priority);
+ if (ret < 0) {
+ printf("Failed to set priority: %s\n", strerror(errno));
+ return -1;
+ }
+ ret = ioctl(fd, VIDIOC_G_PRIORITY, &priority_to_compare);
+ if (ret < 0) {
+ printf("Failed to get new priority: %s\n", strerror(errno));
+ result = -1;
+ goto cleanup;
+ }
+ if (priority_to_compare != new_priority) {
+ printf("Priority wasn't set - test failed\n");
+ result = -1;
}
- /* Process arguments */
- while ((opt = getopt(argc, argv, "d:")) != -1) {
- switch (opt) {
- case 'd':
- strncpy(video_dev, optarg, sizeof(video_dev) - 1);
- video_dev[sizeof(video_dev)-1] = '\0';
- break;
- default:
- printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
- exit(-1);
- }
+cleanup:
+ ret = ioctl(fd, VIDIOC_S_PRIORITY, &old_priority);
+ if (ret < 0) {
+ printf("Failed to restore priority: %s\n", strerror(errno));
+ return -1;
}
+ return result;
+}
+
+int loop_test(int fd)
+{
+ int count;
+ struct v4l2_tuner vtuner;
+ struct v4l2_capability vcap;
+ int ret;
/* Generate random number of interations */
srand((unsigned int) time(NULL));
count = rand();
- /* Open Video device and keep it open */
- fd = open(video_dev, O_RDWR);
- if (fd == -1) {
- printf("Video Device open errno %s\n", strerror(errno));
- exit(-1);
- }
-
printf("\nNote:\n"
"While test is running, remove the device or unbind\n"
"driver and ensure there are no use after free errors\n"
@@ -98,4 +111,46 @@ int main(int argc, char **argv)
sleep(10);
count--;
}
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int opt;
+ char video_dev[256];
+ int fd;
+ int test_result;
+
+ if (argc < 2) {
+ printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
+ exit(-1);
+ }
+
+ /* Process arguments */
+ while ((opt = getopt(argc, argv, "d:")) != -1) {
+ switch (opt) {
+ case 'd':
+ strncpy(video_dev, optarg, sizeof(video_dev) - 1);
+ video_dev[sizeof(video_dev)-1] = '\0';
+ break;
+ default:
+ printf("Usage: %s [-d </dev/videoX>]\n", argv[0]);
+ exit(-1);
+ }
+ }
+
+ /* Open Video device and keep it open */
+ fd = open(video_dev, O_RDWR);
+ if (fd == -1) {
+ printf("Video Device open errno %s\n", strerror(errno));
+ exit(-1);
+ }
+
+ test_result = priority_test(fd);
+ if (!test_result)
+ printf("Priority test - PASSED\n");
+ else
+ printf("Priority test - FAILED\n");
+
+ loop_test(fd);
}
diff --git a/tools/testing/selftests/prctl/set-anon-vma-name-test.c b/tools/testing/selftests/prctl/set-anon-vma-name-test.c
index 26d853c5a0c1..4275cb256dce 100644
--- a/tools/testing/selftests/prctl/set-anon-vma-name-test.c
+++ b/tools/testing/selftests/prctl/set-anon-vma-name-test.c
@@ -97,7 +97,7 @@ TEST_F(vma, renaming) {
TH_LOG("Try to pass invalid name (with non-printable character \\1) to rename the VMA");
EXPECT_EQ(rename_vma((unsigned long)self->ptr_anon, AREA_SIZE, BAD_NAME), -EINVAL);
- TH_LOG("Try to rename non-anonynous VMA");
+ TH_LOG("Try to rename non-anonymous VMA");
EXPECT_EQ(rename_vma((unsigned long) self->ptr_not_anon, AREA_SIZE, GOOD_NAME), -EINVAL);
}
diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile
index 75af864e07b6..50aab6b57da3 100644
--- a/tools/testing/selftests/sgx/Makefile
+++ b/tools/testing/selftests/sgx/Makefile
@@ -17,6 +17,7 @@ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
-fno-stack-protector -mrdrnd $(INCLUDES)
TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+TEST_FILES := $(OUTPUT)/test_encl.elf
ifeq ($(CAN_BUILD_X86_64), 1)
all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
diff --git a/tools/testing/selftests/vDSO/vdso_test_clock_getres.c b/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
index 15dcee16ff72..38d46a8bf7cb 100644
--- a/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
+++ b/tools/testing/selftests/vDSO/vdso_test_clock_getres.c
@@ -84,12 +84,12 @@ static inline int vdso_test_clock(unsigned int clock_id)
int main(int argc, char **argv)
{
- int ret;
+ int ret = 0;
#if _POSIX_TIMERS > 0
#ifdef CLOCK_REALTIME
- ret = vdso_test_clock(CLOCK_REALTIME);
+ ret += vdso_test_clock(CLOCK_REALTIME);
#endif
#ifdef CLOCK_BOOTTIME
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index 598135d3162b..7e8c937627dd 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -18,7 +18,7 @@ TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip syscall_numbering \
- corrupt_xstate_header amx lam
+ corrupt_xstate_header amx lam test_shadow_stack
# Some selftests require 32bit support enabled also on 64bit systems
TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
diff --git a/tools/testing/selftests/x86/test_shadow_stack.c b/tools/testing/selftests/x86/test_shadow_stack.c
new file mode 100644
index 000000000000..94eb223456f6
--- /dev/null
+++ b/tools/testing/selftests/x86/test_shadow_stack.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program test's basic kernel shadow stack support. It enables shadow
+ * stack manual via the arch_prctl(), instead of relying on glibc. It's
+ * Makefile doesn't compile with shadow stack support, so it doesn't rely on
+ * any particular glibc. As a result it can't do any operations that require
+ * special glibc shadow stack support (longjmp(), swapcontext(), etc). Just
+ * stick to the basics and hope the compiler doesn't do anything strange.
+ */
+
+#define _GNU_SOURCE
+
+#include <sys/syscall.h>
+#include <asm/mman.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <x86intrin.h>
+#include <asm/prctl.h>
+#include <sys/prctl.h>
+#include <stdint.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <linux/userfaultfd.h>
+#include <setjmp.h>
+
+/*
+ * Define the ABI defines if needed, so people can run the tests
+ * without building the headers.
+ */
+#ifndef __NR_map_shadow_stack
+#define __NR_map_shadow_stack 451
+
+#define SHADOW_STACK_SET_TOKEN (1ULL << 0)
+
+#define ARCH_SHSTK_ENABLE 0x5001
+#define ARCH_SHSTK_DISABLE 0x5002
+#define ARCH_SHSTK_LOCK 0x5003
+#define ARCH_SHSTK_UNLOCK 0x5004
+#define ARCH_SHSTK_STATUS 0x5005
+
+#define ARCH_SHSTK_SHSTK (1ULL << 0)
+#define ARCH_SHSTK_WRSS (1ULL << 1)
+#endif
+
+#define SS_SIZE 0x200000
+
+#if (__GNUC__ < 8) || (__GNUC__ == 8 && __GNUC_MINOR__ < 5)
+int main(int argc, char *argv[])
+{
+ printf("[SKIP]\tCompiler does not support CET.\n");
+ return 0;
+}
+#else
+void write_shstk(unsigned long *addr, unsigned long val)
+{
+ asm volatile("wrssq %[val], (%[addr])\n"
+ : "=m" (addr)
+ : [addr] "r" (addr), [val] "r" (val));
+}
+
+static inline unsigned long __attribute__((always_inline)) get_ssp(void)
+{
+ unsigned long ret = 0;
+
+ asm volatile("xor %0, %0; rdsspq %0" : "=r" (ret));
+ return ret;
+}
+
+/*
+ * For use in inline enablement of shadow stack.
+ *
+ * The program can't return from the point where shadow stack gets enabled
+ * because there will be no address on the shadow stack. So it can't use
+ * syscall() for enablement, since it is a function.
+ *
+ * Based on code from nolibc.h. Keep a copy here because this can't pull in all
+ * of nolibc.h.
+ */
+#define ARCH_PRCTL(arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = __NR_arch_prctl; \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+void *create_shstk(void *addr)
+{
+ return (void *)syscall(__NR_map_shadow_stack, addr, SS_SIZE, SHADOW_STACK_SET_TOKEN);
+}
+
+void *create_normal_mem(void *addr)
+{
+ return mmap(addr, SS_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+}
+
+void free_shstk(void *shstk)
+{
+ munmap(shstk, SS_SIZE);
+}
+
+int reset_shstk(void *shstk)
+{
+ return madvise(shstk, SS_SIZE, MADV_DONTNEED);
+}
+
+void try_shstk(unsigned long new_ssp)
+{
+ unsigned long ssp;
+
+ printf("[INFO]\tnew_ssp = %lx, *new_ssp = %lx\n",
+ new_ssp, *((unsigned long *)new_ssp));
+
+ ssp = get_ssp();
+ printf("[INFO]\tchanging ssp from %lx to %lx\n", ssp, new_ssp);
+
+ asm volatile("rstorssp (%0)\n":: "r" (new_ssp));
+ asm volatile("saveprevssp");
+ printf("[INFO]\tssp is now %lx\n", get_ssp());
+
+ /* Switch back to original shadow stack */
+ ssp -= 8;
+ asm volatile("rstorssp (%0)\n":: "r" (ssp));
+ asm volatile("saveprevssp");
+}
+
+int test_shstk_pivot(void)
+{
+ void *shstk = create_shstk(0);
+
+ if (shstk == MAP_FAILED) {
+ printf("[FAIL]\tError creating shadow stack: %d\n", errno);
+ return 1;
+ }
+ try_shstk((unsigned long)shstk + SS_SIZE - 8);
+ free_shstk(shstk);
+
+ printf("[OK]\tShadow stack pivot\n");
+ return 0;
+}
+
+int test_shstk_faults(void)
+{
+ unsigned long *shstk = create_shstk(0);
+
+ /* Read shadow stack, test if it's zero to not get read optimized out */
+ if (*shstk != 0)
+ goto err;
+
+ /* Wrss memory that was already read. */
+ write_shstk(shstk, 1);
+ if (*shstk != 1)
+ goto err;
+
+ /* Page out memory, so we can wrss it again. */
+ if (reset_shstk((void *)shstk))
+ goto err;
+
+ write_shstk(shstk, 1);
+ if (*shstk != 1)
+ goto err;
+
+ printf("[OK]\tShadow stack faults\n");
+ return 0;
+
+err:
+ return 1;
+}
+
+unsigned long saved_ssp;
+unsigned long saved_ssp_val;
+volatile bool segv_triggered;
+
+void __attribute__((noinline)) violate_ss(void)
+{
+ saved_ssp = get_ssp();
+ saved_ssp_val = *(unsigned long *)saved_ssp;
+
+ /* Corrupt shadow stack */
+ printf("[INFO]\tCorrupting shadow stack\n");
+ write_shstk((void *)saved_ssp, 0);
+}
+
+void segv_handler(int signum, siginfo_t *si, void *uc)
+{
+ printf("[INFO]\tGenerated shadow stack violation successfully\n");
+
+ segv_triggered = true;
+
+ /* Fix shadow stack */
+ write_shstk((void *)saved_ssp, saved_ssp_val);
+}
+
+int test_shstk_violation(void)
+{
+ struct sigaction sa;
+
+ sa.sa_sigaction = segv_handler;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+ sa.sa_flags = SA_SIGINFO;
+
+ segv_triggered = false;
+
+ /* Make sure segv_triggered is set before violate_ss() */
+ asm volatile("" : : : "memory");
+
+ violate_ss();
+
+ signal(SIGSEGV, SIG_DFL);
+
+ printf("[OK]\tShadow stack violation test\n");
+
+ return !segv_triggered;
+}
+
+/* Gup test state */
+#define MAGIC_VAL 0x12345678
+bool is_shstk_access;
+void *shstk_ptr;
+int fd;
+
+void reset_test_shstk(void *addr)
+{
+ if (shstk_ptr)
+ free_shstk(shstk_ptr);
+ shstk_ptr = create_shstk(addr);
+}
+
+void test_access_fix_handler(int signum, siginfo_t *si, void *uc)
+{
+ printf("[INFO]\tViolation from %s\n", is_shstk_access ? "shstk access" : "normal write");
+
+ segv_triggered = true;
+
+ /* Fix shadow stack */
+ if (is_shstk_access) {
+ reset_test_shstk(shstk_ptr);
+ return;
+ }
+
+ free_shstk(shstk_ptr);
+ create_normal_mem(shstk_ptr);
+}
+
+bool test_shstk_access(void *ptr)
+{
+ is_shstk_access = true;
+ segv_triggered = false;
+ write_shstk(ptr, MAGIC_VAL);
+
+ asm volatile("" : : : "memory");
+
+ return segv_triggered;
+}
+
+bool test_write_access(void *ptr)
+{
+ is_shstk_access = false;
+ segv_triggered = false;
+ *(unsigned long *)ptr = MAGIC_VAL;
+
+ asm volatile("" : : : "memory");
+
+ return segv_triggered;
+}
+
+bool gup_write(void *ptr)
+{
+ unsigned long val;
+
+ lseek(fd, (unsigned long)ptr, SEEK_SET);
+ if (write(fd, &val, sizeof(val)) < 0)
+ return 1;
+
+ return 0;
+}
+
+bool gup_read(void *ptr)
+{
+ unsigned long val;
+
+ lseek(fd, (unsigned long)ptr, SEEK_SET);
+ if (read(fd, &val, sizeof(val)) < 0)
+ return 1;
+
+ return 0;
+}
+
+int test_gup(void)
+{
+ struct sigaction sa;
+ int status;
+ pid_t pid;
+
+ sa.sa_sigaction = test_access_fix_handler;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+ sa.sa_flags = SA_SIGINFO;
+
+ segv_triggered = false;
+
+ fd = open("/proc/self/mem", O_RDWR);
+ if (fd == -1)
+ return 1;
+
+ reset_test_shstk(0);
+ if (gup_read(shstk_ptr))
+ return 1;
+ if (test_shstk_access(shstk_ptr))
+ return 1;
+ printf("[INFO]\tGup read -> shstk access success\n");
+
+ reset_test_shstk(0);
+ if (gup_write(shstk_ptr))
+ return 1;
+ if (test_shstk_access(shstk_ptr))
+ return 1;
+ printf("[INFO]\tGup write -> shstk access success\n");
+
+ reset_test_shstk(0);
+ if (gup_read(shstk_ptr))
+ return 1;
+ if (!test_write_access(shstk_ptr))
+ return 1;
+ printf("[INFO]\tGup read -> write access success\n");
+
+ reset_test_shstk(0);
+ if (gup_write(shstk_ptr))
+ return 1;
+ if (!test_write_access(shstk_ptr))
+ return 1;
+ printf("[INFO]\tGup write -> write access success\n");
+
+ close(fd);
+
+ /* COW/gup test */
+ reset_test_shstk(0);
+ pid = fork();
+ if (!pid) {
+ fd = open("/proc/self/mem", O_RDWR);
+ if (fd == -1)
+ exit(1);
+
+ if (gup_write(shstk_ptr)) {
+ close(fd);
+ exit(1);
+ }
+ close(fd);
+ exit(0);
+ }
+ waitpid(pid, &status, 0);
+ if (WEXITSTATUS(status)) {
+ printf("[FAIL]\tWrite in child failed\n");
+ return 1;
+ }
+ if (*(unsigned long *)shstk_ptr == MAGIC_VAL) {
+ printf("[FAIL]\tWrite in child wrote through to shared memory\n");
+ return 1;
+ }
+
+ printf("[INFO]\tCow gup write -> write access success\n");
+
+ free_shstk(shstk_ptr);
+
+ signal(SIGSEGV, SIG_DFL);
+
+ printf("[OK]\tShadow gup test\n");
+
+ return 0;
+}
+
+int test_mprotect(void)
+{
+ struct sigaction sa;
+
+ sa.sa_sigaction = test_access_fix_handler;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+ sa.sa_flags = SA_SIGINFO;
+
+ segv_triggered = false;
+
+ /* mprotect a shadow stack as read only */
+ reset_test_shstk(0);
+ if (mprotect(shstk_ptr, SS_SIZE, PROT_READ) < 0) {
+ printf("[FAIL]\tmprotect(PROT_READ) failed\n");
+ return 1;
+ }
+
+ /* try to wrss it and fail */
+ if (!test_shstk_access(shstk_ptr)) {
+ printf("[FAIL]\tShadow stack access to read-only memory succeeded\n");
+ return 1;
+ }
+
+ /*
+ * The shadow stack was reset above to resolve the fault, make the new one
+ * read-only.
+ */
+ if (mprotect(shstk_ptr, SS_SIZE, PROT_READ) < 0) {
+ printf("[FAIL]\tmprotect(PROT_READ) failed\n");
+ return 1;
+ }
+
+ /* then back to writable */
+ if (mprotect(shstk_ptr, SS_SIZE, PROT_WRITE | PROT_READ) < 0) {
+ printf("[FAIL]\tmprotect(PROT_WRITE) failed\n");
+ return 1;
+ }
+
+ /* then wrss to it and succeed */
+ if (test_shstk_access(shstk_ptr)) {
+ printf("[FAIL]\tShadow stack access to mprotect() writable memory failed\n");
+ return 1;
+ }
+
+ free_shstk(shstk_ptr);
+
+ signal(SIGSEGV, SIG_DFL);
+
+ printf("[OK]\tmprotect() test\n");
+
+ return 0;
+}
+
+char zero[4096];
+
+static void *uffd_thread(void *arg)
+{
+ struct uffdio_copy req;
+ int uffd = *(int *)arg;
+ struct uffd_msg msg;
+
+ if (read(uffd, &msg, sizeof(msg)) <= 0)
+ return (void *)1;
+
+ req.dst = msg.arg.pagefault.address;
+ req.src = (__u64)zero;
+ req.len = 4096;
+ req.mode = 0;
+
+ if (ioctl(uffd, UFFDIO_COPY, &req))
+ return (void *)1;
+
+ return (void *)0;
+}
+
+int test_userfaultfd(void)
+{
+ struct uffdio_register uffdio_register;
+ struct uffdio_api uffdio_api;
+ struct sigaction sa;
+ pthread_t thread;
+ void *res;
+ int uffd;
+
+ sa.sa_sigaction = test_access_fix_handler;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+ sa.sa_flags = SA_SIGINFO;
+
+ uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+ if (uffd < 0) {
+ printf("[SKIP]\tUserfaultfd unavailable.\n");
+ return 0;
+ }
+
+ reset_test_shstk(0);
+
+ uffdio_api.api = UFFD_API;
+ uffdio_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffdio_api))
+ goto err;
+
+ uffdio_register.range.start = (__u64)shstk_ptr;
+ uffdio_register.range.len = 4096;
+ uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register))
+ goto err;
+
+ if (pthread_create(&thread, NULL, &uffd_thread, &uffd))
+ goto err;
+
+ reset_shstk(shstk_ptr);
+ test_shstk_access(shstk_ptr);
+
+ if (pthread_join(thread, &res))
+ goto err;
+
+ if (test_shstk_access(shstk_ptr))
+ goto err;
+
+ free_shstk(shstk_ptr);
+
+ signal(SIGSEGV, SIG_DFL);
+
+ if (!res)
+ printf("[OK]\tUserfaultfd test\n");
+ return !!res;
+err:
+ free_shstk(shstk_ptr);
+ close(uffd);
+ signal(SIGSEGV, SIG_DFL);
+ return 1;
+}
+
+/*
+ * Too complicated to pull it out of the 32 bit header, but also get the
+ * 64 bit one needed above. Just define a copy here.
+ */
+#define __NR_compat_sigaction 67
+
+/*
+ * Call 32 bit signal handler to get 32 bit signals ABI. Make sure
+ * to push the registers that will get clobbered.
+ */
+int sigaction32(int signum, const struct sigaction *restrict act,
+ struct sigaction *restrict oldact)
+{
+ register long syscall_reg asm("eax") = __NR_compat_sigaction;
+ register long signum_reg asm("ebx") = signum;
+ register long act_reg asm("ecx") = (long)act;
+ register long oldact_reg asm("edx") = (long)oldact;
+ int ret = 0;
+
+ asm volatile ("int $0x80;"
+ : "=a"(ret), "=m"(oldact)
+ : "r"(syscall_reg), "r"(signum_reg), "r"(act_reg),
+ "r"(oldact_reg)
+ : "r8", "r9", "r10", "r11"
+ );
+
+ return ret;
+}
+
+sigjmp_buf jmp_buffer;
+
+void segv_gp_handler(int signum, siginfo_t *si, void *uc)
+{
+ segv_triggered = true;
+
+ /*
+ * To work with old glibc, this can't rely on siglongjmp working with
+ * shadow stack enabled, so disable shadow stack before siglongjmp().
+ */
+ ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK);
+ siglongjmp(jmp_buffer, -1);
+}
+
+/*
+ * Transition to 32 bit mode and check that a #GP triggers a segfault.
+ */
+int test_32bit(void)
+{
+ struct sigaction sa;
+ struct sigaction *sa32;
+
+ /* Create sigaction in 32 bit address range */
+ sa32 = mmap(0, 4096, PROT_READ | PROT_WRITE,
+ MAP_32BIT | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ sa32->sa_flags = SA_SIGINFO;
+
+ sa.sa_sigaction = segv_gp_handler;
+ if (sigaction(SIGSEGV, &sa, NULL))
+ return 1;
+ sa.sa_flags = SA_SIGINFO;
+
+ segv_triggered = false;
+
+ /* Make sure segv_triggered is set before triggering the #GP */
+ asm volatile("" : : : "memory");
+
+ /*
+ * Set handler to somewhere in 32 bit address space
+ */
+ sa32->sa_handler = (void *)sa32;
+ if (sigaction32(SIGUSR1, sa32, NULL))
+ return 1;
+
+ if (!sigsetjmp(jmp_buffer, 1))
+ raise(SIGUSR1);
+
+ if (segv_triggered)
+ printf("[OK]\t32 bit test\n");
+
+ return !segv_triggered;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+
+ if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
+ printf("[SKIP]\tCould not enable Shadow stack\n");
+ return 1;
+ }
+
+ if (ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK)) {
+ ret = 1;
+ printf("[FAIL]\tDisabling shadow stack failed\n");
+ }
+
+ if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_SHSTK)) {
+ printf("[SKIP]\tCould not re-enable Shadow stack\n");
+ return 1;
+ }
+
+ if (ARCH_PRCTL(ARCH_SHSTK_ENABLE, ARCH_SHSTK_WRSS)) {
+ printf("[SKIP]\tCould not enable WRSS\n");
+ ret = 1;
+ goto out;
+ }
+
+ /* Should have succeeded if here, but this is a test, so double check. */
+ if (!get_ssp()) {
+ printf("[FAIL]\tShadow stack disabled\n");
+ return 1;
+ }
+
+ if (test_shstk_pivot()) {
+ ret = 1;
+ printf("[FAIL]\tShadow stack pivot\n");
+ goto out;
+ }
+
+ if (test_shstk_faults()) {
+ ret = 1;
+ printf("[FAIL]\tShadow stack fault test\n");
+ goto out;
+ }
+
+ if (test_shstk_violation()) {
+ ret = 1;
+ printf("[FAIL]\tShadow stack violation test\n");
+ goto out;
+ }
+
+ if (test_gup()) {
+ ret = 1;
+ printf("[FAIL]\tShadow shadow stack gup\n");
+ goto out;
+ }
+
+ if (test_mprotect()) {
+ ret = 1;
+ printf("[FAIL]\tShadow shadow mprotect test\n");
+ goto out;
+ }
+
+ if (test_userfaultfd()) {
+ ret = 1;
+ printf("[FAIL]\tUserfaultfd test\n");
+ goto out;
+ }
+
+ if (test_32bit()) {
+ ret = 1;
+ printf("[FAIL]\t32 bit test\n");
+ }
+
+ return ret;
+
+out:
+ /*
+ * Disable shadow stack before the function returns, or there will be a
+ * shadow stack violation.
+ */
+ if (ARCH_PRCTL(ARCH_SHSTK_DISABLE, ARCH_SHSTK_SHSTK)) {
+ ret = 1;
+ printf("[FAIL]\tDisabling shadow stack failed\n");
+ }
+
+ return ret;
+}
+#endif
diff --git a/tools/virtio/ringtest/.gitignore b/tools/virtio/ringtest/.gitignore
new file mode 100644
index 000000000000..4c6f4bad6b5d
--- /dev/null
+++ b/tools/virtio/ringtest/.gitignore
@@ -0,0 +1,6 @@
+/noring
+/ptr_ring
+/ring
+/virtio_ring_0_9
+/virtio_ring_inorder
+/virtio_ring_poll