summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2022-05-03 11:53:42 +0200
committerMaxime Ripard <maxime@cerno.tech>2022-05-03 11:53:42 +0200
commitb812f646bb818ca0e1806072eb7f0006f3a65dde (patch)
tree3ba2134dcefd8127f5ad286e0fe1135588fbd0b3
parenta5fc012e6ee75a899173398573e77207542f588a (diff)
parente954d2c94d007afe487044ecfa48f2518643df0e (diff)
downloadlinux-b812f646bb818ca0e1806072eb7f0006f3a65dde.tar.gz
Merge drm/drm-next into drm-misc-next
Christian needs a backmerge to avoid a merge conflict for amdgpu. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
-rw-r--r--Documentation/ABI/testing/sysfs-class-firmware-attributes48
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel_sdsi18
-rw-r--r--Documentation/ABI/testing/sysfs-fs-erofs5
-rw-r--r--Documentation/arm64/memory-tagging-extension.rst4
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml1
-rw-r--r--Documentation/devicetree/bindings/bus/ti-sysc.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/microchip,mpfs.yaml13
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml2
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml2
-rw-r--r--Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-timing.yaml42
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,gpi.yaml1
-rw-r--r--Documentation/devicetree/bindings/extcon/maxim,max77843.yaml4
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml5
-rw-r--r--Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml12
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml6
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml84
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml6
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/maxim,max77693.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/coda.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml3
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml8
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-flexcom.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max14577.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77686.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77693.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77802.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77843.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml2
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/realtek.yaml35
-rw-r--r--Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml57
-rw-r--r--Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml20
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml32
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml11
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/renesas,apmu.yaml3
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq2415x.yaml1
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.yaml34
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max14577.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max77686.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max77693.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max77802.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max77843.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max8952.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max8973.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/maxim,max8997.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/richtek,rt5190a-regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml4
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml5
-rw-r--r--Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml3
-rw-r--r--Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml2
-rw-r--r--Documentation/devicetree/bindings/rng/timeriomem_rng.yaml2
-rw-r--r--Documentation/devicetree/bindings/rtc/microchip,mfps-rtc.yaml15
-rw-r--r--Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,arndale.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,snow.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,tm2.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.yaml6
-rw-r--r--Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/samsung,spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/sram/sram.yaml16
-rw-r--r--Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml3
-rw-r--r--Documentation/filesystems/caching/backend-api.rst8
-rw-r--r--Documentation/filesystems/caching/netfs-api.rst25
-rw-r--r--Documentation/filesystems/ext4/attributes.rst2
-rw-r--r--Documentation/filesystems/f2fs.rst70
-rw-r--r--Documentation/gpu/amdgpu/amdgpu-glossary.rst13
-rw-r--r--Documentation/gpu/drm-usage-stats.rst112
-rw-r--r--Documentation/gpu/i915.rst28
-rw-r--r--Documentation/gpu/index.rst1
-rw-r--r--Documentation/networking/bonding.rst4
-rw-r--r--Documentation/networking/ip-sysctl.rst7
-rw-r--r--Documentation/security/siphash.rst46
-rw-r--r--Documentation/virt/kvm/api.rst25
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst2
-rw-r--r--Documentation/virt/kvm/x86/amd-memory-encryption.rst2
-rw-r--r--Documentation/virt/kvm/x86/errata.rst2
-rw-r--r--Documentation/virt/kvm/x86/running-nested-guests.rst2
-rw-r--r--Documentation/vm/page_owner.rst5
-rw-r--r--MAINTAINERS137
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/arc/boot/dts/hsdk.dts2
-rw-r--r--arch/arc/include/asm/atomic-llsc.h32
-rw-r--r--arch/arc/include/asm/pgtable-levels.h3
-rw-r--r--arch/arc/kernel/disasm.c3
-rw-r--r--arch/arc/kernel/entry.S1
-rw-r--r--arch/arc/kernel/signal.c2
-rw-r--r--arch/arc/kernel/smp.c4
-rw-r--r--arch/arc/kernel/unaligned.c2
-rw-r--r--arch/arc/mm/cache.c2
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts45
-rw-r--r--arch/arm/boot/dts/am3517-som.dtsi9
-rw-r--r--arch/arm/boot/dts/at91-dvk_su60_somc.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-q5xr5.dts2
-rw-r--r--arch/arm/boot/dts/at91-sam9_l9260.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts8
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts6
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama7g5ek.dts4
-rw-r--r--arch/arm/boot/dts/at91-vinco.dts2
-rw-r--r--arch/arm/boot/dts/at91rm9200ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9260ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi45
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9rlek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts2
-rw-r--r--arch/arm/boot/dts/dm8168-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/imx28-ts4600.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-apalis.dtsi10
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi32
-rw-r--r--arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6ull-colibri.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts15
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts15
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi15
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-pins.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom-ipq8064.dtsi8
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3xmb_cmp.dtsi2
-rw-r--r--arch/arm/boot/dts/sama7g5.dtsi18
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts2
-rw-r--r--arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ev1.dts4
-rw-r--r--arch/arm/boot/dts/usb_a9263.dts2
-rw-r--r--arch/arm/configs/gemini_defconfig5
-rw-r--r--arch/arm/configs/imote2_defconfig365
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig3
-rw-r--r--arch/arm/configs/u8500_defconfig19
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c4
-rw-r--r--arch/arm/mach-ep93xx/clock.c6
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-iop32x/cp6.c2
-rw-r--r--arch/arm/mach-omap2/omap4-common.c2
-rw-r--r--arch/arm/mach-vexpress/spc.c28
-rw-r--r--arch/arm/xen/enlighten.c9
-rw-r--r--arch/arm64/Kconfig15
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi40
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi40
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1.dtsi20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h28
-rw-r--r--arch/arm64/include/asm/kvm_host.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h4
-rw-r--r--arch/arm64/kernel/elfcore.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S18
-rw-r--r--arch/arm64/kvm/inject_fault.c28
-rw-r--r--arch/arm64/kvm/mmu.c30
-rw-r--r--arch/arm64/kvm/pmu-emul.c23
-rw-r--r--arch/arm64/kvm/psci.c34
-rw-r--r--arch/arm64/kvm/reset.c65
-rw-r--r--arch/arm64/kvm/vgic/vgic-debug.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c2
-rw-r--r--arch/powerpc/kernel/module.c2
-rw-r--r--arch/powerpc/kernel/time.c29
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c9
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c45
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c44
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c16
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c4
-rw-r--r--arch/powerpc/perf/Makefile4
-rw-r--r--arch/powerpc/perf/power10-pmu.c2
-rw-r--r--arch/powerpc/perf/power9-pmu.c8
-rw-r--r--arch/riscv/Kconfig.socs2
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi16
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts2
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi10
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig1
-rw-r--r--arch/riscv/include/asm/kvm_host.h3
-rw-r--r--arch/riscv/kernel/patch.c2
-rw-r--r--arch/riscv/kvm/vcpu.c39
-rw-r--r--arch/riscv/kvm/vcpu_exit.c4
-rw-r--r--arch/riscv/kvm/vcpu_fp.c1
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c5
-rw-r--r--arch/riscv/mm/init.c1
-rw-r--r--arch/s390/Kconfig19
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/configs/debug_defconfig8
-rw-r--r--arch/s390/configs/defconfig6
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/include/asm/entry-common.h2
-rw-r--r--arch/s390/include/asm/processor.h8
-rw-r--r--arch/s390/include/asm/stacktrace.h2
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c8
-rw-r--r--arch/s390/kvm/pv.c7
-rw-r--r--arch/s390/kvm/vsie.c4
-rw-r--r--arch/s390/lib/test_unwind.c2
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/entry/entry_64.S3
-rw-r--r--arch/x86/events/intel/cstate.c7
-rw-r--r--arch/x86/include/asm/compat.h6
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h11
-rw-r--r--arch/x86/include/asm/microcode.h2
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/static_call.h1
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpu.h5
-rw-r--r--arch/x86/kernel/cpu/intel.c7
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c6
-rw-r--r--arch/x86/kernel/cpu/tsx.c104
-rw-r--r--arch/x86/kernel/crash_dump_64.c1
-rw-r--r--arch/x86/kernel/kvm.c13
-rw-r--r--arch/x86/kernel/unwind_orc.c8
-rw-r--r--arch/x86/kvm/cpuid.c19
-rw-r--r--arch/x86/kvm/hyperv.c40
-rw-r--r--arch/x86/kvm/hyperv.h2
-rw-r--r--arch/x86/kvm/mmu.h24
-rw-r--r--arch/x86/kvm/mmu/mmu.c77
-rw-r--r--arch/x86/kvm/mmu/spte.h6
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c17
-rw-r--r--arch/x86/kvm/pmu.h9
-rw-r--r--arch/x86/kvm/svm/avic.c3
-rw-r--r--arch/x86/kvm/svm/pmu.c1
-rw-r--r--arch/x86/kvm/svm/sev.c70
-rw-r--r--arch/x86/kvm/svm/svm.c1
-rw-r--r--arch/x86/kvm/svm/svm.h2
-rw-r--r--arch/x86/kvm/vmx/nested.c5
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c8
-rw-r--r--arch/x86/kvm/vmx/vmx.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.h1
-rw-r--r--arch/x86/kvm/x86.c95
-rw-r--r--arch/x86/lib/copy_user_64.S87
-rw-r--r--arch/x86/lib/putuser.S4
-rw-r--r--arch/x86/lib/retpoline.S2
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c11
-rw-r--r--arch/x86/pci/xen.c6
-rw-r--r--arch/x86/platform/pvh/head.S1
-rw-r--r--arch/x86/power/cpu.c10
-rw-r--r--arch/x86/xen/xen-head.S1
-rw-r--r--arch/xtensa/kernel/coprocessor.S4
-rw-r--r--arch/xtensa/kernel/jump_label.c2
-rw-r--r--arch/xtensa/platforms/iss/console.c8
-rw-r--r--block/bfq-iosched.c12
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-iocost.c12
-rw-r--r--block/blk-mq.c12
-rw-r--r--block/ioctl.c2
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/android/binder.c10
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/base/arch_topology.c11
-rw-r--r--drivers/base/dd.c1
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/block/Kconfig16
-rw-r--r--drivers/block/floppy.c43
-rw-r--r--drivers/block/null_blk/main.c2
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c6
-rw-r--r--drivers/bus/imx-weim.c5
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c16
-rw-r--r--drivers/char/random.c49
-rw-r--r--drivers/clk/microchip/clk-mpfs.c195
-rw-r--r--drivers/clk/qcom/clk-rcg2.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c2
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c70
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c1
-rw-r--r--drivers/dma/at_xdmac.c12
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c16
-rw-r--r--drivers/dma/idxd/device.c6
-rw-r--r--drivers/dma/idxd/submit.c5
-rw-r--r--drivers/dma/idxd/sysfs.c6
-rw-r--r--drivers/dma/imx-sdma.c32
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c9
-rw-r--r--drivers/edac/synopsys_edac.c16
-rw-r--r--drivers/firmware/arm_scmi/clock.c5
-rw-r--r--drivers/firmware/arm_scmi/driver.c3
-rw-r--r--drivers/firmware/arm_scmi/optee.c8
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c3
-rw-r--r--drivers/gpio/gpio-sim.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c22
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c308
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h (renamed from drivers/gpu/drm/radeon/r600_blit_shaders.h)28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c977
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c83
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c83
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c235
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c234
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c863
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h48
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c76
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c38
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c38
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h66
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h13
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h24
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c41
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c87
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c43
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c27
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c147
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c8
-rw-r--r--drivers/gpu/drm/drm_of.c84
-rw-r--r--drivers/gpu/drm/i915/Kconfig3
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c38
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c70
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c50
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c144
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c52
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h1
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c12
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c113
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.h4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c49
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c693
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h29
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c224
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c223
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h41
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.c654
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_gmch.h46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c40
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c122
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.h34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c601
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hwconfig.h21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.h27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c390
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c126
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps_types.h15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c54
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c21
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c86
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c259
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h218
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c48
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c185
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c1657
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h92
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c164
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c125
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c56
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c645
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c37
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c158
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h25
-rw-r--r--drivers/gpu/drm/i915/i915_file_private.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c89
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c297
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h37
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c4
-rw-r--r--drivers/gpu/drm/i915/i915_query.c94
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h24
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c310
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.h3
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c108
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h6
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h8
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h8
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c7
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c86
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h7
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c13
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c34
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c3
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c1
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c13
-rw-r--r--drivers/gpu/drm/radeon/Makefile8
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c320
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h294
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c303
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.h278
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c719
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c2
-rw-r--r--drivers/gpu/drm/radeon/si_blit_shaders.c253
-rw-r--r--drivers/gpu/drm/radeon/si_blit_shaders.h223
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c3
-rw-r--r--drivers/gpu/drm/vc4/Kconfig3
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c7
-rw-r--r--drivers/i2c/busses/i2c-imx.c33
-rw-r--r--drivers/i2c/busses/i2c-ismt.c4
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/i2c-dev.c17
-rw-r--r--drivers/idle/intel_idle.c27
-rw-r--r--drivers/iio/adc/ad7280a.c12
-rw-r--r--drivers/iio/chemical/scd4x.c5
-rw-r--r--drivers/iio/dac/ad3552r.c6
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/dac/ltc2688.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c28
-rw-r--r--drivers/iio/filter/Kconfig1
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c20
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c15
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/proximity/sx9324.c32
-rw-r--r--drivers/iio/proximity/sx_common.c1
-rw-r--r--drivers/input/keyboard/cypress-sf.c14
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/interconnect/qcom/sc7180.c21
-rw-r--r--drivers/interconnect/qcom/sdx55.c21
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/dm-integrity.c7
-rw-r--r--drivers/md/dm-ps-historical-service-time.c11
-rw-r--r--drivers/md/dm-zone.c49
-rw-r--r--drivers/md/dm.c18
-rw-r--r--drivers/media/platform/nxp/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/tuners/si2157.c22
-rw-r--r--drivers/memory/atmel-ebi.c23
-rw-r--r--drivers/memory/fsl_ifc.c3
-rw-r--r--drivers/memory/renesas-rpc-if.c70
-rw-r--r--drivers/misc/eeprom/at25.c19
-rw-r--r--drivers/misc/mei/Kconfig14
-rw-r--r--drivers/misc/mei/Makefile3
-rw-r--r--drivers/misc/mei/bus-fixup.c25
-rw-r--r--drivers/misc/mei/gsc-me.c259
-rw-r--r--drivers/misc/mei/hw-me.c29
-rw-r--r--drivers/misc/mei/hw-me.h2
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c12
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c24
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c14
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/dsa/lantiq_gswip.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c5
-rw-r--r--drivers/net/dsa/ocelot/felix.c23
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c2
-rw-r--r--drivers/net/dsa/realtek/Kconfig30
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c1
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c9
-rw-r--r--drivers/net/ethernet/Kconfig26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c8
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c31
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c129
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c8
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/macvlan.c8
-rw-r--r--drivers/net/mdio/fwnode_mdio.c5
-rw-r--r--drivers/net/phy/marvell10g.c2
-rw-r--r--drivers/net/phy/microchip_t1.c8
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/virtio_net.c20
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wireguard/device.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/perf/arm_pmu.c10
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c20
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5250-sata.c21
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c2
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c1
-rw-r--r--drivers/phy/ti/phy-tusb1210.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c60
-rw-r--r--drivers/pinctrl/mediatek/Kconfig1
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c69
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c16
-rw-r--r--drivers/pinctrl/samsung/Kconfig11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c23
-rw-r--r--drivers/pinctrl/sunplus/sppctl_sp7021.c8
-rw-r--r--drivers/platform/x86/acerhdf.c21
-rw-r--r--drivers/platform/x86/amd-pmc.c14
-rw-r--r--drivers/platform/x86/asus-wmi.c15
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c1
-rw-r--r--drivers/platform/x86/dell/dell-laptop.c13
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel/pmc/core.h2
-rw-r--r--drivers/platform/x86/intel/sdsi.c44
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c3
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/think-lmi.c44
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/power/supply/power_supply_core.c6
-rw-r--r--drivers/power/supply/samsung-sdi-battery.c2
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c4
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c6
-rw-r--r--drivers/scsi/libiscsi.c27
-rw-r--r--drivers/scsi/libiscsi_tcp.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c33
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c69
-rw-r--r--drivers/scsi/scsi_debug.c197
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c239
-rw-r--r--drivers/scsi/sr_ioctl.c15
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c2
-rw-r--r--drivers/spi/atmel-quadspi.c3
-rw-r--r--drivers/spi/spi-cadence-quadspi.c19
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-mtk-nor.c12
-rw-r--r--drivers/target/target_core_pscsi.c10
-rw-r--r--drivers/tee/optee/ffa_abi.c1
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/gov_user_space.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c4
-rw-r--r--drivers/thermal/thermal_sysfs.c3
-rw-r--r--drivers/tty/n_gsm.c477
-rw-r--r--drivers/tty/serial/8250/8250_pci.c8
-rw-r--r--drivers/tty/serial/8250/8250_port.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c9
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c6
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c7
-rw-r--r--drivers/usb/core/devio.c14
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc3/core.c34
-rw-r--r--drivers/usb/dwc3/drd.c11
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c8
-rw-r--r--drivers/usb/dwc3/gadget.c31
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c23
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/xhci-hub.c2
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci-tegra.c4
-rw-r--r--drivers/usb/host/xhci.c11
-rw-r--r--drivers/usb/misc/qcom_eud.c10
-rw-r--r--drivers/usb/misc/uss720.c3
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c6
-rw-r--r--drivers/usb/phy/phy-generic.c7
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/option.c12
-rw-r--r--drivers/usb/serial/whiteheat.c5
-rw-r--r--drivers/usb/typec/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c24
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c124
-rw-r--r--drivers/video/fbdev/arkfb.c3
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c1
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c1
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c1
-rw-r--r--drivers/video/fbdev/aty/radeonfb.h2
-rw-r--r--drivers/video/fbdev/clps711x-fb.c3
-rw-r--r--drivers/video/fbdev/controlfb.c3
-rw-r--r--drivers/video/fbdev/i740fb.c5
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h1
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/mmp/core.c11
-rw-r--r--drivers/video/fbdev/neofb.c2
-rw-r--r--drivers/video/fbdev/omap/hwa742.c6
-rw-r--r--drivers/video/fbdev/omap/lcdc.c6
-rw-r--r--drivers/video/fbdev/omap/sossi.c5
-rw-r--r--drivers/video/fbdev/platinumfb.c2
-rw-r--r--drivers/video/fbdev/pm2fb.c8
-rw-r--r--drivers/video/fbdev/pxafb.c4
-rw-r--r--drivers/video/fbdev/s3fb.c3
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c3
-rw-r--r--drivers/video/fbdev/sis/sis_main.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c3
-rw-r--r--drivers/video/fbdev/udlfb.c14
-rw-r--r--drivers/video/fbdev/valkyriefb.c3
-rw-r--r--drivers/video/fbdev/vt8623fb.c3
-rw-r--r--drivers/video/of_display_timing.c2
-rw-r--r--drivers/xen/balloon.c54
-rw-r--r--drivers/xen/gntalloc.c4
-rw-r--r--drivers/xen/unpopulated-alloc.c33
-rw-r--r--fs/afs/write.c3
-rw-r--r--fs/binfmt_elf.c4
-rw-r--r--fs/btrfs/block-group.c40
-rw-r--r--fs/btrfs/block-group.h4
-rw-r--r--fs/btrfs/compression.c8
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/dev-replace.c7
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c44
-rw-r--r--fs/btrfs/inode.c27
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/scrub.c26
-rw-r--r--fs/btrfs/tree-log.c1
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/btrfs/zoned.h4
-rw-r--r--fs/cachefiles/namei.c33
-rw-r--r--fs/cachefiles/xattr.c2
-rw-r--r--fs/ceph/caps.c7
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/cifs/cifsfs.c30
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/dfs_cache.c19
-rw-r--r--fs/cifs/link.c3
-rw-r--r--fs/cifs/smb2ops.c17
-rw-r--r--fs/cifs/trace.h7
-rw-r--r--fs/cifs/transport.c7
-rw-r--r--fs/erofs/zdata.c12
-rw-r--r--fs/erofs/zdata.h2
-rw-r--r--fs/ext4/ext4.h7
-rw-r--r--fs/ext4/extents.c32
-rw-r--r--fs/ext4/inode.c18
-rw-r--r--fs/ext4/ioctl.c16
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/ext4/super.c40
-rw-r--r--fs/f2fs/checkpoint.c6
-rw-r--r--fs/f2fs/data.c33
-rw-r--r--fs/f2fs/f2fs.h9
-rw-r--r--fs/f2fs/inode.c3
-rw-r--r--fs/f2fs/segment.c95
-rw-r--r--fs/f2fs/super.c32
-rw-r--r--fs/fscache/Kconfig3
-rw-r--r--fs/fscache/cache.c2
-rw-r--r--fs/fscache/cookie.c4
-rw-r--r--fs/fscache/internal.h4
-rw-r--r--fs/fscache/io.c5
-rw-r--r--fs/gfs2/file.c6
-rw-r--r--fs/hugetlbfs/inode.c9
-rw-r--r--fs/io-wq.h1
-rw-r--r--fs/io_uring.c114
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/kernfs/dir.c7
-rw-r--r--fs/ksmbd/misc.c40
-rw-r--r--fs/ksmbd/misc.h3
-rw-r--r--fs/ksmbd/oplock.c30
-rw-r--r--fs/ksmbd/oplock.h2
-rw-r--r--fs/ksmbd/smb2pdu.c34
-rw-r--r--fs/ksmbd/vfs.c6
-rw-r--r--fs/ksmbd/vfs_cache.c2
-rw-r--r--fs/ksmbd/vfs_cache.h1
-rw-r--r--fs/namei.c22
-rw-r--r--fs/namespace.c14
-rw-r--r--fs/nfsd/filecache.c25
-rw-r--r--fs/nfsd/nfs2acl.c24
-rw-r--r--fs/pipe.c9
-rw-r--r--fs/posix_acl.c10
-rw-r--r--fs/stat.c19
-rw-r--r--fs/xattr.c6
-rw-r--r--fs/xfs/xfs_buf.c6
-rw-r--r--fs/xfs/xfs_buf.h42
-rw-r--r--fs/xfs/xfs_inode.c24
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/zonefs/super.c46
-rw-r--r--include/asm-generic/bug.h11
-rw-r--r--include/asm-generic/unaligned.h2
-rw-r--r--include/drm/display/drm_dp.h2
-rw-r--r--include/dt-bindings/clock/microchip,mpfs-clock.h5
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/dma-buf-map.h266
-rw-r--r--include/linux/fscache.h4
-rw-r--r--include/linux/gpio/consumer.h8
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/kernel.h11
-rw-r--r--include/linux/kfence.h24
-rw-r--r--include/linux/kvm_host.h26
-rw-r--r--include/linux/mei_aux.h19
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/netdevice.h21
-rw-r--r--include/linux/posix_acl_xattr.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sched/mm.h8
-rw-r--r--include/linux/sunrpc/svc.h1
-rw-r--r--include/linux/t10-pi.h9
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/usb/pd_bdo.h2
-rw-r--r--include/linux/vfio_pci_core.h2
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/memory/renesas-rpc-if.h1
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/esp.h2
-rw-r--r--include/net/flow_dissector.h2
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h13
-rw-r--r--include/net/netns/ipv6.h4
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/net/xsk_buff_pool.h1
-rw-r--r--include/scsi/libiscsi.h9
-rw-r--r--include/scsi/scsi_transport_iscsi.h4
-rw-r--r--include/sound/core.h1
-rw-r--r--include/sound/memalloc.h5
-rw-r--r--include/sound/soc-component.h4
-rw-r--r--include/trace/events/sunrpc.h7
-rw-r--r--include/uapi/drm/i915_drm.h291
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/fb.h2
-rw-r--r--include/uapi/linux/input-event-codes.h21
-rw-r--r--include/uapi/linux/io_uring.h1
-rw-r--r--include/uapi/linux/kvm.h10
-rw-r--r--include/uapi/linux/stddef.h4
-rw-r--r--kernel/cpu.c36
-rw-r--r--kernel/dma/direct.h3
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/events/internal.h5
-rw-r--r--kernel/events/ring_buffer.c5
-rw-r--r--kernel/irq/affinity.c5
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/kcov.c7
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/time/tick-sched.c6
-rw-r--r--kernel/time/timer.c11
-rw-r--r--lib/hexdump.c41
-rw-r--r--lib/strncpy_from_user.c2
-rw-r--r--lib/strnlen_user.c2
-rw-r--r--lib/xarray.c2
-rw-r--r--mm/compaction.c10
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/hugetlb.c27
-rw-r--r--mm/kasan/hw_tags.c5
-rw-r--r--mm/kasan/kasan.h10
-rw-r--r--mm/kasan/quarantine.c7
-rw-r--r--mm/kfence/core.c21
-rw-r--r--mm/kfence/kfence.h21
-rw-r--r--mm/kfence/report.c47
-rw-r--r--mm/kmemleak.c8
-rw-r--r--mm/memcontrol.c12
-rw-r--r--mm/memory-failure.c158
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/mmu_notifier.c14
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c54
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/page_io.c54
-rw-r--r--mm/secretmem.c17
-rw-r--r--mm/shmem.c31
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c9
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c2
-rw-r--r--mm/userfaultfd.c15
-rw-r--r--mm/util.c11
-rw-r--r--mm/vmalloc.c64
-rw-r--r--mm/workingset.c2
-rw-r--r--net/bluetooth/hci_conn.c32
-rw-r--r--net/bluetooth/hci_event.c80
-rw-r--r--net/bluetooth/hci_sync.c11
-rw-r--r--net/bpf/test_run.c5
-rw-r--r--net/bridge/br_switchdev.c2
-rw-r--r--net/can/isotp.c10
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/lwt_bpf.c7
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/dsa/dsa2.c23
-rw-r--r--net/dsa/port.c2
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/dsa/tag_hellcreek.c8
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/ip_gre.c16
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c0
-rw-r--r--net/ipv4/syncookies.c8
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv4/tcp_rate.c11
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/ip6_gre.c28
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/netfilter.c10
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/l3mdev/l3mdev.c2
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mctp/device.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c21
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c6
-rw-r--r--net/netfilter/nft_socket.c59
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/nfc/nci/core.c4
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/packet/af_packet.c13
-rw-r--r--net/rxrpc/net_ns.c2
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_flower.c18
-rw-r--r--net/sched/cls_u32.c24
-rw-r--r--net/sched/sch_taprio.c3
-rw-r--r--net/sctp/sm_sideeffect.c4
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/smc/af_smc.c155
-rw-r--r--net/smc/smc.h29
-rw-r--r--net/smc/smc_clc.c6
-rw-r--r--net/smc/smc_close.c5
-rw-r--r--net/smc/smc_pnet.c5
-rw-r--r--net/sunrpc/svc_xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/tls/tls_device.c12
-rw-r--r--net/wireless/nl80211.c3
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xdp/xsk.c15
-rw-r--r--net/xdp/xsk_buff_pool.c16
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c44
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--sound/core/init.c28
-rw-r--r--sound/core/memalloc.c111
-rw-r--r--sound/core/pcm_misc.c2
-rw-r--r--sound/drivers/mtpav.c4
-rw-r--r--sound/hda/hdac_i915.c28
-rw-r--r--sound/hda/intel-dsp-config.c27
-rw-r--r--sound/isa/galaxy/galaxy.c7
-rw-r--r--sound/isa/sc6000.c7
-rw-r--r--sound/oss/dmasound/dmasound.h6
-rw-r--r--sound/oss/dmasound/dmasound_core.c26
-rw-r--r--sound/pci/ad1889.c10
-rw-r--r--sound/pci/ali5451/ali5451.c10
-rw-r--r--sound/pci/als300.c8
-rw-r--r--sound/pci/als4000.c10
-rw-r--r--sound/pci/atiixp.c10
-rw-r--r--sound/pci/atiixp_modem.c10
-rw-r--r--sound/pci/au88x0/au88x0.c8
-rw-r--r--sound/pci/aw2/aw2-alsa.c8
-rw-r--r--sound/pci/azt3328.c8
-rw-r--r--sound/pci/bt87x.c10
-rw-r--r--sound/pci/ca0106/ca0106_main.c10
-rw-r--r--sound/pci/cmipci.c8
-rw-r--r--sound/pci/cs4281.c10
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c10
-rw-r--r--sound/pci/echoaudio/echoaudio.c9
-rw-r--r--sound/pci/emu10k1/emu10k1x.c10
-rw-r--r--sound/pci/ens1370.c10
-rw-r--r--sound/pci/es1938.c10
-rw-r--r--sound/pci/es1968.c10
-rw-r--r--sound/pci/fm801.c10
-rw-r--r--sound/pci/hda/patch_hdmi.c7
-rw-r--r--sound/pci/hda/patch_realtek.c14
-rw-r--r--sound/pci/ice1712/ice1724.c10
-rw-r--r--sound/pci/intel8x0.c10
-rw-r--r--sound/pci/intel8x0m.c10
-rw-r--r--sound/pci/korg1212/korg1212.c8
-rw-r--r--sound/pci/lola/lola.c10
-rw-r--r--sound/pci/lx6464es/lx6464es.c8
-rw-r--r--sound/pci/maestro3.c8
-rw-r--r--sound/pci/nm256/nm256.c2
-rw-r--r--sound/pci/oxygen/oxygen_lib.c12
-rw-r--r--sound/pci/riptide/riptide.c8
-rw-r--r--sound/pci/rme32.c8
-rw-r--r--sound/pci/rme96.c10
-rw-r--r--sound/pci/rme9652/hdsp.c8
-rw-r--r--sound/pci/rme9652/hdspm.c8
-rw-r--r--sound/pci/rme9652/rme9652.c8
-rw-r--r--sound/pci/sis7019.c14
-rw-r--r--sound/pci/sonicvibes.c10
-rw-r--r--sound/pci/via82xx.c10
-rw-r--r--sound/pci/via82xx_modem.c10
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c61
-rw-r--r--sound/soc/codecs/cs35l41-lib.c10
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c14
-rw-r--r--sound/soc/codecs/lpass-tx-macro.c14
-rw-r--r--sound/soc/codecs/lpass-va-macro.c8
-rw-r--r--sound/soc/codecs/msm8916-wcd-digital.c9
-rw-r--r--sound/soc/codecs/rk817_codec.c2
-rw-r--r--sound/soc/codecs/rt5682.c20
-rw-r--r--sound/soc/codecs/rt5682s.c11
-rw-r--r--sound/soc/codecs/rt711.c7
-rw-r--r--sound/soc/codecs/wcd934x.c26
-rw-r--r--sound/soc/codecs/wm8731.c19
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/generic/simple-card-utils.c6
-rw-r--r--sound/soc/intel/boards/sof_es8336.c117
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c13
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-tgl-match.c4
-rw-r--r--sound/soc/meson/aiu-acodec-ctrl.c3
-rw-r--r--sound/soc/meson/aiu-codec-ctrl.c3
-rw-r--r--sound/soc/meson/aiu.c3
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/soc/soc-dapm.c6
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/soc/soc-topology.c4
-rw-r--r--sound/soc/sof/sof-pci-dev.c9
-rw-r--r--sound/soc/sof/topology.c50
-rw-r--r--sound/usb/midi.c1
-rw-r--r--sound/usb/mixer_maps.c4
-rw-r--r--sound/usb/pcm.c16
-rw-r--r--sound/usb/usbaudio.h2
-rw-r--r--sound/x86/intel_hdmi_audio.c7
-rw-r--r--tools/arch/x86/include/asm/msr-index.h4
-rw-r--r--tools/include/linux/slab.h8
-rw-r--r--tools/lib/perf/evlist.c3
-rw-r--r--tools/objtool/check.c94
-rw-r--r--tools/objtool/elf.c189
-rw-r--r--tools/objtool/include/objtool/elf.h4
-rw-r--r--tools/objtool/include/objtool/objtool.h2
-rw-r--r--tools/objtool/objtool.c1
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c10
-rw-r--r--tools/perf/arch/arm64/util/machine.c21
-rw-r--r--tools/perf/arch/powerpc/util/Build1
-rw-r--r--tools/perf/arch/powerpc/util/machine.c25
-rw-r--r--tools/perf/arch/s390/util/machine.c16
-rw-r--r--tools/perf/bench/numa.c136
-rw-r--r--tools/perf/builtin-record.c22
-rw-r--r--tools/perf/builtin-report.c14
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/tests/attr/README1
-rw-r--r--tools/perf/tests/attr/test-record-spe-physical-address12
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c4
-rw-r--r--tools/perf/util/arm-spe.c5
-rw-r--r--tools/perf/util/c++/clang.cpp4
-rw-r--r--tools/perf/util/header.c51
-rw-r--r--tools/perf/util/header.h1
-rw-r--r--tools/perf/util/parse-events.c5
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/stat.c3
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol.c37
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/power/x86/intel-speed-select/Makefile2
-rw-r--r--tools/testing/radix-tree/linux.c3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh17
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh17
-rw-r--r--tools/testing/selftests/kvm/.gitignore2
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c15
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c14
-rw-r--r--tools/testing/selftests/kvm/aarch64/vcpu_width_config.c122
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c34
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h4
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h17
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c2
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c202
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/emulator_error_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c1
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c1
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c25
-rw-r--r--tools/testing/selftests/vm/mremap_test.c85
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh11
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/i686.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/x86_64.config1
-rw-r--r--virt/kvm/dirty_ring.c2
-rw-r--r--virt/kvm/kvm_main.c44
-rw-r--r--virt/kvm/kvm_mm.h2
1357 files changed, 21604 insertions, 12447 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-firmware-attributes b/Documentation/ABI/testing/sysfs-class-firmware-attributes
index 05820365f1ec..4cdba3477176 100644
--- a/Documentation/ABI/testing/sysfs-class-firmware-attributes
+++ b/Documentation/ABI/testing/sysfs-class-firmware-attributes
@@ -116,7 +116,7 @@ Description:
<value>[ForceIf:<attribute>=<value>]
<value>[ForceIfNot:<attribute>=<value>]
- For example:
+ For example::
LegacyOrom/dell_value_modifier has value:
Disabled[ForceIf:SecureBoot=Enabled]
@@ -212,7 +212,7 @@ Description:
the next boot.
Lenovo specific class extensions
- ------------------------------
+ --------------------------------
On Lenovo systems the following additional settings are available:
@@ -246,9 +246,7 @@ Description:
that is being referenced (e.g hdd0, hdd1 etc)
This attribute defaults to device 0.
- certificate:
- signature:
- save_signature:
+ certificate, signature, save_signature:
These attributes are used for certificate based authentication. This is
used in conjunction with a signing server as an alternative to password
based authentication.
@@ -257,22 +255,27 @@ Description:
The attributes can be displayed to check the stored value.
Some usage examples:
- Installing a certificate to enable feature:
- echo <supervisor password > authentication/Admin/current_password
- echo <signed certificate> > authentication/Admin/certificate
- Updating the installed certificate:
- echo <signature> > authentication/Admin/signature
- echo <signed certificate> > authentication/Admin/certificate
+ Installing a certificate to enable feature::
+
+ echo "supervisor password" > authentication/Admin/current_password
+ echo "signed certificate" > authentication/Admin/certificate
+
+ Updating the installed certificate::
+
+ echo "signature" > authentication/Admin/signature
+ echo "signed certificate" > authentication/Admin/certificate
- Removing the installed certificate:
- echo <signature> > authentication/Admin/signature
- echo '' > authentication/Admin/certificate
+ Removing the installed certificate::
- Changing a BIOS setting:
- echo <signature> > authentication/Admin/signature
- echo <save signature> > authentication/Admin/save_signature
- echo Enable > attribute/PasswordBeep/current_value
+ echo "signature" > authentication/Admin/signature
+ echo "" > authentication/Admin/certificate
+
+ Changing a BIOS setting::
+
+ echo "signature" > authentication/Admin/signature
+ echo "save signature" > authentication/Admin/save_signature
+ echo Enable > attribute/PasswordBeep/current_value
You cannot enable certificate authentication if a supervisor password
has not been set.
@@ -288,9 +291,10 @@ Description:
certificate_to_password:
Write only attribute used to switch from certificate based authentication
back to password based.
- Usage:
- echo <signature> > authentication/Admin/signature
- echo <password> > authentication/Admin/certificate_to_password
+ Usage::
+
+ echo "signature" > authentication/Admin/signature
+ echo "password" > authentication/Admin/certificate_to_password
What: /sys/class/firmware-attributes/*/attributes/pending_reboot
@@ -345,7 +349,7 @@ Description:
# echo "factory" > /sys/class/firmware-attributes/*/device/attributes/reset_bios
# cat /sys/class/firmware-attributes/*/device/attributes/reset_bios
- # builtinsafe lastknowngood [factory] custom
+ builtinsafe lastknowngood [factory] custom
Note that any changes to this attribute requires a reboot
for changes to take effect.
diff --git a/Documentation/ABI/testing/sysfs-driver-intel_sdsi b/Documentation/ABI/testing/sysfs-driver-intel_sdsi
index ab122125ff9a..96b92c105ec4 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel_sdsi
+++ b/Documentation/ABI/testing/sysfs-driver-intel_sdsi
@@ -13,17 +13,19 @@ Description:
Should the operation fail, one of the following error codes
may be returned:
+ ========== =====
Error Code Cause
- ---------- -----
- EIO General mailbox failure. Log may indicate cause.
- EBUSY Mailbox is owned by another agent.
- EPERM SDSI capability is not enabled in hardware.
- EPROTO Failure in mailbox protocol detected by driver.
+ ========== =====
+ EIO General mailbox failure. Log may indicate cause.
+ EBUSY Mailbox is owned by another agent.
+ EPERM SDSI capability is not enabled in hardware.
+ EPROTO Failure in mailbox protocol detected by driver.
See log for details.
- EOVERFLOW For provision commands, the size of the data
+ EOVERFLOW For provision commands, the size of the data
exceeds what may be written.
- ESPIPE Seeking is not allowed.
- ETIMEDOUT Failure to complete mailbox transaction in time.
+ ESPIPE Seeking is not allowed.
+ ETIMEDOUT Failure to complete mailbox transaction in time.
+ ========== =====
What: /sys/bus/auxiliary/devices/intel_vsec.sdsi.X/guid
Date: Feb 2022
diff --git a/Documentation/ABI/testing/sysfs-fs-erofs b/Documentation/ABI/testing/sysfs-fs-erofs
index 05482374a741..bb4681a01811 100644
--- a/Documentation/ABI/testing/sysfs-fs-erofs
+++ b/Documentation/ABI/testing/sysfs-fs-erofs
@@ -9,8 +9,9 @@ Description: Shows all enabled kernel features.
What: /sys/fs/erofs/<disk>/sync_decompress
Date: November 2021
Contact: "Huang Jianan" <huangjianan@oppo.com>
-Description: Control strategy of sync decompression
+Description: Control strategy of sync decompression:
+
- 0 (default, auto): enable for readpage, and enable for
- readahead on atomic contexts only,
+ readahead on atomic contexts only.
- 1 (force on): enable for readpage and readahead.
- 2 (force off): disable for all situations.
diff --git a/Documentation/arm64/memory-tagging-extension.rst b/Documentation/arm64/memory-tagging-extension.rst
index dd27f78d7608..dbae47bba25e 100644
--- a/Documentation/arm64/memory-tagging-extension.rst
+++ b/Documentation/arm64/memory-tagging-extension.rst
@@ -228,10 +228,10 @@ Core dump support
-----------------
The allocation tags for user memory mapped with ``PROT_MTE`` are dumped
-in the core file as additional ``PT_ARM_MEMTAG_MTE`` segments. The
+in the core file as additional ``PT_AARCH64_MEMTAG_MTE`` segments. The
program header for such segment is defined as:
-:``p_type``: ``PT_ARM_MEMTAG_MTE``
+:``p_type``: ``PT_AARCH64_MEMTAG_MTE``
:``p_flags``: 0
:``p_offset``: segment file offset
:``p_vaddr``: segment virtual address, same as the corresponding
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
index 0afec83cc723..564ae6aaccf7 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
@@ -14,7 +14,6 @@ properties:
compatible:
enum:
- nvidia,tegra20-pmc
- - nvidia,tegra20-pmc
- nvidia,tegra30-pmc
- nvidia,tegra114-pmc
- nvidia,tegra124-pmc
diff --git a/Documentation/devicetree/bindings/bus/ti-sysc.yaml b/Documentation/devicetree/bindings/bus/ti-sysc.yaml
index bd40213302da..fced4082b047 100644
--- a/Documentation/devicetree/bindings/bus/ti-sysc.yaml
+++ b/Documentation/devicetree/bindings/bus/ti-sysc.yaml
@@ -35,7 +35,6 @@ properties:
- items:
- enum:
- ti,sysc-omap2
- - ti,sysc-omap2
- ti,sysc-omap4
- ti,sysc-omap4-simple
- ti,sysc-omap2-timer
diff --git a/Documentation/devicetree/bindings/clock/microchip,mpfs.yaml b/Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
index 0c15afa2214c..016a4f378b9b 100644
--- a/Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
+++ b/Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
@@ -22,7 +22,16 @@ properties:
const: microchip,mpfs-clkcfg
reg:
- maxItems: 1
+ items:
+ - description: |
+ clock config registers:
+ These registers contain enable, reset & divider tables for the, cpu,
+ axi, ahb and rtc/mtimer reference clocks as well as enable and reset
+ for the peripheral clocks.
+ - description: |
+ mss pll dri registers:
+ Block of registers responsible for dynamic reconfiguration of the mss
+ pll
clocks:
maxItems: 1
@@ -51,7 +60,7 @@ examples:
#size-cells = <2>;
clkcfg: clock-controller@20002000 {
compatible = "microchip,mpfs-clkcfg";
- reg = <0x0 0x20002000 0x0 0x1000>;
+ reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>;
clocks = <&ref>;
#clock-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml
index f14f1d39da36..d819dfaafff9 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC Audio SubSystem clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml
index 4e8062860986..0589a63e273a 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml
index 64d027dbe3b2..c98eff64f2b5 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung SoC external/osc/XXTI/XusbXTI clock
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml
index 1ed64add4355..b644bbd0df38 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos4412 SoC ISP clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
index a3fac5c6809d..b05f83533e3d 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos5260 SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
index 032862e9f55b..b737c9d35a1c 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos5410 SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
index edd1b4ac4334..3f9326e09f79 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos5433 SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
index 599baf0b7231..c137c6744ef9 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos7 SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
index 7e5a9cac2fd2..5073e569a47f 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
@@ -9,7 +9,7 @@ title: Samsung Exynos7885 SoC clock controller
maintainers:
- Dávid Virág <virag.david003@gmail.com>
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
index 80ba60838f2b..aa11815ad3a3 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
@@ -9,7 +9,7 @@ title: Samsung Exynos850 SoC clock controller
maintainers:
- Sam Protsenko <semen.protsenko@linaro.org>
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
index 1410c51e0e7d..9248bfc16d48 100644
--- a/Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2M and S5M family clock generator block
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml
index ae8f8fc93233..2659854ea1c0 100644
--- a/Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung S5Pv210 SoC Audio SubSystem clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
index dcb29a2d1159..67a33665cf00 100644
--- a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
@@ -8,7 +8,7 @@ title: Samsung S5P6442/S5PC110/S5PV210 SoC clock controller
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml b/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml
index d318fccf78f1..2bdd05af6079 100644
--- a/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml
+++ b/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos NoC (Network on Chip) Probe
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
The Samsung Exynos542x SoC has a NoC (Network on Chip) Probe for NoC bus.
diff --git a/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml b/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml
index c9a8cb5fd555..e300df4b47f3 100644
--- a/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml
+++ b/Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC PPMU (Platform Performance Monitoring Unit)
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
diff --git a/Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml b/Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml
index d31483a78eab..6fb7e321f011 100644
--- a/Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml
@@ -160,7 +160,7 @@ examples:
mdss: mdss@5e00000 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "qcom,qcm2290-mdss", "qcom,mdss";
+ compatible = "qcom,qcm2290-mdss";
reg = <0x05e00000 0x1000>;
reg-names = "mdss";
power-domains = <&dispcc MDSS_GDSC>;
@@ -180,7 +180,7 @@ examples:
<&apps_smmu 0x421 0x0>;
ranges;
- mdss_mdp: mdp@5e01000 {
+ mdss_mdp: display-controller@5e01000 {
compatible = "qcom,qcm2290-dpu";
reg = <0x05e01000 0x8f000>,
<0x05eb0000 0x2008>;
diff --git a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
index 9bf592dc3033..7749de95ee40 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
@@ -71,78 +71,72 @@ properties:
hfront-porch:
description: Horizontal front porch panel timing
+ $ref: /schemas/types.yaml#/definitions/uint32-array
oneOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ - maxItems: 1
items:
description: typical number of pixels
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 3
+ - minItems: 3
maxItems: 3
items:
description: min, typ, max number of pixels
hback-porch:
description: Horizontal back porch timing
+ $ref: /schemas/types.yaml#/definitions/uint32-array
oneOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ - maxItems: 1
items:
description: typical number of pixels
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 3
+ - minItems: 3
maxItems: 3
items:
description: min, typ, max number of pixels
hsync-len:
description: Horizontal sync length panel timing
+ $ref: /schemas/types.yaml#/definitions/uint32-array
oneOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ - maxItems: 1
items:
description: typical number of pixels
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 3
+ - minItems: 3
maxItems: 3
items:
description: min, typ, max number of pixels
vfront-porch:
description: Vertical front porch panel timing
+ $ref: /schemas/types.yaml#/definitions/uint32-array
oneOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ - maxItems: 1
items:
description: typical number of lines
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 3
+ - minItems: 3
maxItems: 3
items:
description: min, typ, max number of lines
vback-porch:
description: Vertical back porch panel timing
+ $ref: /schemas/types.yaml#/definitions/uint32-array
oneOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ - maxItems: 1
items:
description: typical number of lines
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 3
+ - minItems: 3
maxItems: 3
items:
description: min, typ, max number of lines
vsync-len:
description: Vertical sync length panel timing
+ $ref: /schemas/types.yaml#/definitions/uint32-array
oneOf:
- - $ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
+ - maxItems: 1
items:
description: typical number of lines
- - $ref: /schemas/types.yaml#/definitions/uint32-array
- minItems: 3
+ - minItems: 3
maxItems: 3
items:
description: min, typ, max number of lines
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml
index f998a3a5b71f..919734c05c0b 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml
index cb8e735ce3bd..63379fae3636 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
index ba40284ac66f..00e325a19cb1 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description:
Samsung Exynos SoC Mixer is responsible for mixing and blending multiple data
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml
index 6f796835ea03..7c37470bd329 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
DECON (Display and Enhancement Controller) is the Display Controller for the
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml
index 01fccb138ebd..c5c6239c28d0 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
MIC (Mobile Image Compressor) resides between DECON and MIPI DSI. MIPI DSI is
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml
index afa137d47922..320eedc61a5b 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
DECON (Display and Enhancement Controller) is the Display Controller for the
diff --git a/Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml b/Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml
index 9cf5f120d516..c62ea9d22843 100644
--- a/Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml
+++ b/Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
index e614fe3187bb..d09d79d7406a 100644
--- a/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,gpi.yaml
@@ -29,6 +29,7 @@ properties:
interrupts:
description:
Interrupt lines for each GPI instance
+ minItems: 1
maxItems: 13
"#dma-cells":
diff --git a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
index f9ffe3d6f957..128960545640 100644
--- a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
+++ b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77843 MicroUSB and Companion Power Management IC Extcon
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77843 MicroUSB
@@ -25,7 +25,7 @@ properties:
$ref: /schemas/connector/usb-connector.yaml#
ports:
- $ref: /schemas/graph.yaml#/properties/port
+ $ref: /schemas/graph.yaml#/properties/ports
description:
Any connector to the data bus of this controller should be modelled using
the OF graph bindings specified
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 4d6bfae0653c..85f8d4764740 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -20,6 +20,7 @@ properties:
- mediatek,mt8183-mali
- realtek,rtd1619-mali
- renesas,r9a07g044-mali
+ - renesas,r9a07g054-mali
- rockchip,px30-mali
- rockchip,rk3568-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
@@ -109,7 +110,9 @@ allOf:
properties:
compatible:
contains:
- const: renesas,r9a07g044-mali
+ enum:
+ - renesas,r9a07g044-mali
+ - renesas,r9a07g054-mali
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml b/Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml
index 4b5851c326f7..b1a4c235376e 100644
--- a/Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml
+++ b/Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LTC4151 High Voltage I2C Current and Voltage Monitor
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml b/Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml
index c42051f8a191..028d6e570131 100644
--- a/Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml
+++ b/Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Microchip MCP3021 A/D converter
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml b/Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml
index 4669217d01e1..80df7182ea28 100644
--- a/Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml
+++ b/Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sensirion SHT15 humidity and temperature sensor
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
index d3eff4fac107..c5a889e3e27b 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TMP102 temperature sensor
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
index eda55bbc172d..dcbc6fbc3b48 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TMP108 temperature sensor
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml b/Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml
index 801ca9ba7d34..e7493e25a7d2 100644
--- a/Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml
+++ b/Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml
@@ -58,9 +58,8 @@ patternProperties:
The value (two's complement) to be programmed in the channel specific N correction register.
For remote channels only.
$ref: /schemas/types.yaml#/definitions/int32
- items:
- minimum: -128
- maximum: 127
+ minimum: -128
+ maximum: 127
required:
- reg
diff --git a/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml b/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
index 19874e8b73b9..3e52a0db6c41 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung's High Speed I2C controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
The Samsung's High Speed I2C controller is used to interface with I2C devices
diff --git a/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml b/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
index 84051b0129c2..c26230518957 100644
--- a/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S3C/S5P/Exynos SoC I2C Controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
index cf711082ad7d..666414a9c0de 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
@@ -98,6 +98,7 @@ allOf:
- ti,adc121s
- ti,ads7866
- ti,ads7868
+ then:
required:
- vcc-supply
# Devices with a vref
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
index 7c260f209687..92f9472a77ae 100644
--- a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
@@ -108,9 +108,7 @@ patternProperties:
- [1-5]: order 1 to 5.
For audio purpose it is recommended to use order 3 to 5.
$ref: /schemas/types.yaml#/definitions/uint32
- items:
- minimum: 0
- maximum: 5
+ maximum: 5
"#io-channel-cells":
const: 1
@@ -174,7 +172,7 @@ patternProperties:
contains:
const: st,stm32-dfsdm-adc
- - then:
+ then:
properties:
st,adc-channels:
minItems: 1
@@ -206,7 +204,7 @@ patternProperties:
contains:
const: st,stm32-dfsdm-dmic
- - then:
+ then:
properties:
st,adc-channels:
maxItems: 1
@@ -254,7 +252,7 @@ allOf:
contains:
const: st,stm32h7-dfsdm
- - then:
+ then:
patternProperties:
"^filter@[0-9]+$":
properties:
@@ -269,7 +267,7 @@ allOf:
contains:
const: st,stm32mp1-dfsdm
- - then:
+ then:
patternProperties:
"^filter@[0-9]+$":
properties:
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml
index 0d8fb56f4b09..65f86f26947c 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml
@@ -59,9 +59,9 @@ allOf:
contains:
enum:
- adi,ad5371
- then:
- required:
- - vref2-supply
+ then:
+ required:
+ - vref2-supply
examples:
- |
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
index 89853b482513..8a676fef8c1d 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
@@ -93,48 +93,48 @@ allOf:
- qcom,sdm660-gnoc
- qcom,sdm660-snoc
- then:
- properties:
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
-
- # Child node's properties
- patternProperties:
- '^interconnect-[a-z0-9]+$':
- type: object
- description:
- snoc-mm is a child of snoc, sharing snoc's register address space.
-
- properties:
- compatible:
- enum:
- - qcom,msm8939-snoc-mm
-
- '#interconnect-cells':
- const: 1
-
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
-
- required:
- - compatible
- - '#interconnect-cells'
- - clock-names
- - clocks
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+ # Child node's properties
+ patternProperties:
+ '^interconnect-[a-z0-9]+$':
+ type: object
+ description:
+ snoc-mm is a child of snoc, sharing snoc's register address space.
+
+ properties:
+ compatible:
+ enum:
+ - qcom,msm8939-snoc-mm
+
+ '#interconnect-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+ required:
+ - compatible
+ - '#interconnect-cells'
+ - clock-names
+ - clocks
- if:
properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml
index 372ccbfae771..5a583bf3dbc1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml
@@ -7,10 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell MMP/Orion Interrupt controller bindings
maintainers:
- - Thomas Gleixner <tglx@linutronix.de>
- - Jason Cooper <jason@lakedaemon.net>
- - Marc Zyngier <maz@kernel.org>
- - Rob Herring <robh+dt@kernel.org>
+ - Andrew Lunn <andrew@lunn.ch>
+ - Gregory Clement <gregory.clement@bootlin.com>
allOf:
- if:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml b/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml
index d631b7589d50..72456a07dac9 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC Interrupt Combiner Controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
Samsung's Exynos4 architecture includes a interrupt combiner controller which
diff --git a/Documentation/devicetree/bindings/leds/maxim,max77693.yaml b/Documentation/devicetree/bindings/leds/maxim,max77693.yaml
index 86a0005cf156..e27f57bb52ae 100644
--- a/Documentation/devicetree/bindings/leds/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/leds/maxim,max77693.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX77693 MicroUSB and Companion Power Management IC LEDs
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
diff --git a/Documentation/devicetree/bindings/media/coda.yaml b/Documentation/devicetree/bindings/media/coda.yaml
index 36781ee4617f..c9d5adbc8c4a 100644
--- a/Documentation/devicetree/bindings/media/coda.yaml
+++ b/Documentation/devicetree/bindings/media/coda.yaml
@@ -65,7 +65,6 @@ properties:
iram:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle pointing to the SRAM device node
- maxItems: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
index 9b179bb44dfb..aa55ca65d6ed 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
@@ -63,13 +63,11 @@ properties:
mediatek,vpu:
$ref: /schemas/types.yaml#/definitions/phandle
- maxItems: 1
description:
Describes point to vpu.
mediatek,scp:
$ref: /schemas/types.yaml#/definitions/phandle
- maxItems: 1
description:
Describes point to scp.
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
index e7b65a91c92c..deb5b657a2d5 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
@@ -55,13 +55,11 @@ properties:
mediatek,vpu:
$ref: /schemas/types.yaml#/definitions/phandle
- maxItems: 1
description:
Describes point to vpu.
mediatek,scp:
$ref: /schemas/types.yaml#/definitions/phandle
- maxItems: 1
description:
Describes point to scp.
@@ -106,7 +104,6 @@ allOf:
enum:
- mediatek,mt8173-vcodec-enc
- mediatek,mt8192-vcodec-enc
- - mediatek,mt8173-vcodec-enc
then:
properties:
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
index 7687be0f50aa..c73bf2352aca 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
@@ -61,7 +61,6 @@ properties:
mediatek,scp:
$ref: /schemas/types.yaml#/definitions/phandle
- maxItems: 1
description: |
The node of system control processor (SCP), using
the remoteproc & rpmsg framework.
diff --git a/Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml b/Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
index 769f13250047..08cbdcddfead 100644
--- a/Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: DDR PHY Front End (DPFE) for Broadcom STB
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Markus Mayer <mmayer@broadcom.com>
properties:
diff --git a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml
index f3e62ee07126..1daa66592477 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LPDDR2 SDRAM AC timing parameters for a given speed-bin
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
index dd2141cad866..9d78f140609b 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LPDDR2 SDRAM compliant to JEDEC JESD209-2
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml
index 97c3e988af5f..5c6512c1e1e3 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LPDDR3 SDRAM AC timing parameters for a given speed-bin
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml
index c542f32c39fa..48908a19473c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LPDDR3 SDRAM compliant to JEDEC JESD209-3
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml b/Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
index 14a6bc8f421f..9249624c4fa0 100644
--- a/Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
@@ -8,7 +8,7 @@ title: Marvell MVEBU SDRAM controller
maintainers:
- Jan Luebbe <jlu@pengutronix.de>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml b/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
index 9566b3421f03..0c511ab906bf 100644
--- a/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Atheros AR7xxx/AR9xxx DDR controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
The DDR controller of the AR7xxx and AR9xxx families provides an interface to
diff --git a/Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml b/Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml
index 2b18cef99511..514b2c5f8858 100644
--- a/Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: H8/300 bus controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Yoshinori Sato <ysato@users.sourceforge.jp>
properties:
diff --git a/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml b/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
index f152243f6b18..098348b2b815 100644
--- a/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
@@ -9,7 +9,7 @@ title: |
Controller device
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Lukasz Luba <lukasz.luba@arm.com>
description: |
diff --git a/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml b/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
index fb7ae38a9c86..f46e95704f53 100644
--- a/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Synopsys IntelliDDR Multi Protocol memory controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Manish Narani <manish.narani@xilinx.com>
- Michal Simek <michal.simek@xilinx.com>
@@ -24,9 +24,9 @@ description: |
properties:
compatible:
enum:
+ - snps,ddrc-3.80a
- xlnx,zynq-ddrc-a05
- xlnx,zynqmp-ddrc-2.40a
- - snps,ddrc-3.80a
interrupts:
maxItems: 1
@@ -43,7 +43,9 @@ allOf:
properties:
compatible:
contains:
- const: xlnx,zynqmp-ddrc-2.40a
+ enum:
+ - snps,ddrc-3.80a
+ - xlnx,zynqmp-ddrc-2.40a
then:
required:
- interrupts
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml b/Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml
index 9ed51185ff99..382ddab60fbd 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml
@@ -8,7 +8,7 @@ title: Texas Instruments da8xx DDR2/mDDR memory controller
maintainers:
- Bartosz Golaszewski <bgolaszewski@baylibre.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
Documentation:
diff --git a/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt b/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
index 692300117c64..9d837535637b 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
@@ -54,7 +54,7 @@ flexcom@f8034000 {
clock-names = "spi_clk";
atmel,fifo-size = <32>;
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at25f512b";
reg = <0>;
spi-max-frequency = <20000000>;
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max14577.yaml b/Documentation/devicetree/bindings/mfd/maxim,max14577.yaml
index 27870b8760a6..52edd1bf549f 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max14577.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max14577.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
index 859655a789c3..d027aabe453b 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77686 Power Management
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
index 906101197e11..1b06a77ec798 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77693 MicroUSB
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77802.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77802.yaml
index baa1346ac5d5..ad2013900b03 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77802.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77802.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC
maintainers:
- Javier Martinez Canillas <javier@dowhile0.org>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77802 Power Management
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77843.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77843.yaml
index 61a0f9dcb983..f30f96bbff43 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77843.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77843.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX77843 MicroUSB and Companion Power Management IC
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77843 MicroUSB
diff --git a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
index bae55c98961c..f7bb67d10eff 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC Low Power Audio Subsystem (LPASS)
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
properties:
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
index 017befdf8adb..055dfc337c2f 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPA01 Power Management IC
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
index 771b3f16da96..5ff6546c72b7 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPS11/13/14/15 and S2MPU02 Power Management IC
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml b/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
index 5531718abdf0..10c7b408f33a 100644
--- a/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
+++ b/Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S5M8767 Power Management IC
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
index ce64b3498378..f3f4d5b02744 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
@@ -197,6 +197,8 @@ allOf:
- nvidia,tegra30-sdhci
- nvidia,tegra114-sdhci
- nvidia,tegra124-sdhci
+ then:
+ properties:
clocks:
items:
- description: module clock
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
index 8756060895a8..99ee4b5b9346 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
@@ -27,32 +27,25 @@ description:
The realtek-mdio driver is an MDIO driver and it must be inserted inside
an MDIO node.
+ The compatible string is only used to identify which (silicon) family the
+ switch belongs to. Roughly speaking, a family is any set of Realtek switches
+ whose chip identification register(s) have a common location and semantics.
+ The different models in a given family can be automatically disambiguated by
+ parsing the chip identification register(s) according to the given family,
+ avoiding the need for a unique compatible string for each model.
+
properties:
compatible:
enum:
- realtek,rtl8365mb
- - realtek,rtl8366
- realtek,rtl8366rb
- - realtek,rtl8366s
- - realtek,rtl8367
- - realtek,rtl8367b
- - realtek,rtl8367rb
- - realtek,rtl8367s
- - realtek,rtl8368s
- - realtek,rtl8369
- - realtek,rtl8370
description: |
- realtek,rtl8365mb: 4+1 ports
- realtek,rtl8366: 5+1 ports
- realtek,rtl8366rb: 5+1 ports
- realtek,rtl8366s: 5+1 ports
- realtek,rtl8367:
- realtek,rtl8367b:
- realtek,rtl8367rb: 5+2 ports
- realtek,rtl8367s: 5+2 ports
- realtek,rtl8368s: 8 ports
- realtek,rtl8369: 8+1 ports
- realtek,rtl8370: 8+2 ports
+ realtek,rtl8365mb:
+ Use with models RTL8363NB, RTL8363NB-VB, RTL8363SC, RTL8363SC-VB,
+ RTL8364NB, RTL8364NB-VB, RTL8365MB, RTL8366SC, RTL8367RB-VB, RTL8367S,
+ RTL8367SB, RTL8370MB, RTL8310SR
+ realtek,rtl8366rb:
+ Use with models RTL8366RB, RTL8366S
mdc-gpios:
description: GPIO line for the MDC clock line.
@@ -335,7 +328,7 @@ examples:
#size-cells = <0>;
switch@29 {
- compatible = "realtek,rtl8367s";
+ compatible = "realtek,rtl8365mb";
reg = <29>;
reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml b/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
index 15a45db3899a..1bcaf6ba822c 100644
--- a/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell International Ltd. NCI NFC controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml b/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
index 7465aea2e1c0..e381a3c14836 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
@@ -8,7 +8,7 @@ title: NXP Semiconductors NCI NFC controller
maintainers:
- Charles Gorand <charles.gorand@effinnov.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml b/Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml
index d8ba5a18db98..0509e0166345 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP Semiconductors PN532 NFC controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml b/Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml
index d520414de463..18b3a7d819df 100644
--- a/Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP Semiconductors PN544 NFC Controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml b/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
index a6a1bc788d29..ef1155038a2f 100644
--- a/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics ST NCI NFC controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml b/Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml
index 4356eacde8aa..8a7274357b46 100644
--- a/Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics SAS ST21NFCA NFC controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml b/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
index d3bca376039e..963d9531a856 100644
--- a/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: STMicroelectronics ST95HF NFC controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
index 40da2ac98978..404c8df99364 100644
--- a/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments TRF7970A RFID/NFC/15693 Transceiver
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Mark Greer <mgreer@animalcreek.com>
properties:
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 2d5248f5b919..36c85eb3dc0d 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -53,20 +53,18 @@ properties:
- allwinner,sun8i-r40-gmac
- allwinner,sun8i-v3s-emac
- allwinner,sun50i-a64-emac
- - loongson,ls2k-dwmac
- - loongson,ls7a-dwmac
- amlogic,meson6-dwmac
- amlogic,meson8b-dwmac
- amlogic,meson8m2-dwmac
- amlogic,meson-gxbb-dwmac
- amlogic,meson-axg-dwmac
- - loongson,ls2k-dwmac
- - loongson,ls7a-dwmac
- ingenic,jz4775-mac
- ingenic,x1000-mac
- ingenic,x1600-mac
- ingenic,x1830-mac
- ingenic,x2000-mac
+ - loongson,ls2k-dwmac
+ - loongson,ls7a-dwmac
- rockchip,px30-gmac
- rockchip,rk3128-gmac
- rockchip,rk3228-gmac
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
index e602761f7b14..b0ebcef6801c 100644
--- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
@@ -13,9 +13,6 @@ description: |
This describes the devicetree bindings for AVE ethernet controller
implemented on Socionext UniPhier SoCs.
-allOf:
- - $ref: ethernet-controller.yaml#
-
properties:
compatible:
enum:
@@ -44,25 +41,13 @@ properties:
minItems: 1
maxItems: 4
- clock-names:
- oneOf:
- - items: # for Pro4
- - const: gio
- - const: ether
- - const: ether-gb
- - const: ether-phy
- - const: ether # for others
+ clock-names: true
resets:
minItems: 1
maxItems: 2
- reset-names:
- oneOf:
- - items: # for Pro4
- - const: gio
- - const: ether
- - const: ether # for others
+ reset-names: true
socionext,syscon-phy-mode:
$ref: /schemas/types.yaml#/definitions/phandle-array
@@ -78,6 +63,42 @@ properties:
$ref: mdio.yaml#
unevaluatedProperties: false
+allOf:
+ - $ref: ethernet-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: socionext,uniphier-pro4-ave4
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: gio
+ - const: ether
+ - const: ether-gb
+ - const: ether-phy
+ resets:
+ minItems: 2
+ maxItems: 2
+ reset-names:
+ items:
+ - const: gio
+ - const: ether
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ const: ether
+ resets:
+ maxItems: 1
+ reset-names:
+ const: ether
+
required:
- compatible
- reg
@@ -90,7 +111,7 @@ required:
- reset-names
- mdio
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
index dbfca5ee9139..6f44f9516c36 100644
--- a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
@@ -56,6 +56,7 @@ if:
compatible:
contains:
const: ti,davinci_mdio
+then:
required:
- bus_freq
diff --git a/Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml b/Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml
index dfde0eaf66e1..d61585c96e31 100644
--- a/Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml
@@ -275,17 +275,17 @@ allOf:
- nvidia,hssquelch-level
- nvidia,hsdiscon-level
- else:
- properties:
- clocks:
- maxItems: 4
+ else:
+ properties:
+ clocks:
+ maxItems: 4
- clock-names:
- items:
- - const: reg
- - const: pll_u
- - const: timer
- - const: utmi-pads
+ clock-names:
+ items:
+ - const: reg
+ - const: pll_u
+ - const: timer
+ - const: utmi-pads
- if:
properties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
index e23e5590eaa3..0655e485b260 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
@@ -14,24 +14,24 @@ if:
compatible:
contains:
const: qcom,usb-hs-phy-apq8064
- then:
- properties:
- resets:
- maxItems: 1
+then:
+ properties:
+ resets:
+ maxItems: 1
- reset-names:
- const: por
+ reset-names:
+ const: por
- else:
- properties:
- resets:
- minItems: 2
- maxItems: 2
+else:
+ properties:
+ resets:
+ minItems: 2
+ maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: por
+ reset-names:
+ items:
+ - const: phy
+ - const: por
properties:
compatible:
@@ -92,6 +92,8 @@ additionalProperties: false
examples:
- |
otg: usb-controller {
+ #reset-cells = <1>;
+
ulpi {
phy {
compatible = "qcom,usb-hs-phy-msm8974", "qcom,usb-hs-phy";
diff --git a/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
index 838c6d480ce6..b03b2f00cc5b 100644
--- a/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC DisplayPort PHY
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Marek Szyprowski <m.szyprowski@samsung.com>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
diff --git a/Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml
index c61574e10b2a..3e5f035de2e9 100644
--- a/Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml
@@ -11,7 +11,7 @@ maintainers:
- Joonyoung Shim <jy0922.shim@samsung.com>
- Seung-Woo Kim <sw0312.kim@samsung.com>
- Kyungmin Park <kyungmin.park@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml
index 62b39bb46585..8751e559484f 100644
--- a/Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos5250 SoC SATA PHY
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Marek Szyprowski <m.szyprowski@samsung.com>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
diff --git a/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
index 54aa056b224d..415440aaad89 100644
--- a/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S5P/Exynos SoC MIPI CSIS/DSIM DPHY
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Marek Szyprowski <m.szyprowski@samsung.com>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml
index 056e270a4e88..d9f22a801cbf 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S5P/Exynos SoC USB 2.0 PHY
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Marek Szyprowski <m.szyprowski@samsung.com>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index f83f0f8135b9..5ba55f9f20cc 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC USB 3.0 DRD PHY USB 2.0 PHY
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Marek Szyprowski <m.szyprowski@samsung.com>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml b/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
index 8a90d8273767..6bd42e43cdab 100644
--- a/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
@@ -48,13 +48,12 @@ properties:
Name of one pin group to configure.
enum: [ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1,
pdmspk2, dmic4, dmic5, dmic6, gpio1, gpio2, gpio3,
- gpio4, gpio5, gpio6, gpio7, gpio7, gpio8, gpio9,
+ gpio4, gpio5, gpio6, gpio7, gpio8, gpio9,
gpio10, gpio11, gpio12, gpio13, gpio14, gpio15,
- gpio16, gpio17, gpio17, gpio18, gpio19, gpio20,
- gpio21, gpio22, gpio23, gpio24, gpio25, gpio26,
- gpio27, gpio27, gpio28, gpio29, gpio30, gpio31,
- gpio32, gpio33, gpio34, gpio35, gpio36, gpio37,
- gpio37, gpio38, gpio39 ]
+ gpio16, gpio17, gpio18, gpio19, gpio20, gpio21,
+ gpio22, gpio23, gpio24, gpio25, gpio26, gpio27,
+ gpio28, gpio29, gpio30, gpio31, gpio32, gpio33,
+ gpio34, gpio35, gpio36, gpio37, gpio38, gpio39 ]
function:
description:
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml
index f73348c54748..8cf3c47ab86b 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S3C/S5P/Exynos SoC pin controller - gpio bank
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml
index c71939ac8b63..9869d4dceddb 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S3C/S5P/Exynos SoC pin controller - pins configuration
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
index a822f70f5702..1de91a51234d 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S3C/S5P/Exynos SoC pin controller - wake-up interrupt controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
index 989e48c051cf..3a65c66ca71d 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S3C/S5P/Exynos SoC pin controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
- Tomasz Figa <tomasz.figa@gmail.com>
diff --git a/Documentation/devicetree/bindings/power/renesas,apmu.yaml b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
index 4d293b2b2f84..d77fc88050c8 100644
--- a/Documentation/devicetree/bindings/power/renesas,apmu.yaml
+++ b/Documentation/devicetree/bindings/power/renesas,apmu.yaml
@@ -36,7 +36,8 @@ properties:
cpus:
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- maxItems: 1
+ minItems: 1
+ maxItems: 4
description: |
Array of phandles pointing to CPU cores, which should match the order of
CPU cores used by the WUPCR and PSTR registers in the Advanced Power
diff --git a/Documentation/devicetree/bindings/power/supply/bq2415x.yaml b/Documentation/devicetree/bindings/power/supply/bq2415x.yaml
index f8461f06e6f4..118cf484cc69 100644
--- a/Documentation/devicetree/bindings/power/supply/bq2415x.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq2415x.yaml
@@ -17,7 +17,6 @@ properties:
compatible:
enum:
- ti,bq24150
- - ti,bq24150
- ti,bq24150a
- ti,bq24151
- ti,bq24151a
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
index 3978b48299de..4d3a1d09036f 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC Charger
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
index a21dc1a8890f..f5fd53debbc8 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX77693 MicroUSB and Companion Power Management IC Charger
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
index 9b131c6facbc..84eeaef179a5 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
@@ -18,23 +18,23 @@ description:
allOf:
- $ref: "regulator.yaml#"
-
-if:
- properties:
- compatible:
- contains:
- const: regulator-fixed-clock
- required:
- - clocks
-else:
- if:
- properties:
- compatible:
- contains:
- const: regulator-fixed-domain
- required:
- - power-domains
- - required-opps
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: regulator-fixed-clock
+ then:
+ required:
+ - clocks
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: regulator-fixed-domain
+ then:
+ required:
+ - power-domains
+ - required-opps
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max14577.yaml b/Documentation/devicetree/bindings/regulator/maxim,max14577.yaml
index 16f01886a601..285dc7122977 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max14577.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max14577.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max77686.yaml b/Documentation/devicetree/bindings/regulator/maxim,max77686.yaml
index bb64b679f765..0e7cd4b3ace0 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max77686.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max77686.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC regulators
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77686 Power Management
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max77693.yaml b/Documentation/devicetree/bindings/regulator/maxim,max77693.yaml
index 20d8559bdc2b..945a539749e8 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max77693.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC regulators
maintainers:
- Chanwoo Choi <cw00.choi@samsung.com>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max77802.yaml b/Documentation/devicetree/bindings/regulator/maxim,max77802.yaml
index f2b4dd15a0f3..236348c4710c 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max77802.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max77802.yaml
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC regulators
maintainers:
- Javier Martinez Canillas <javier@dowhile0.org>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77802 Power Management
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max77843.yaml b/Documentation/devicetree/bindings/regulator/maxim,max77843.yaml
index a963025e96c1..9695e7242882 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max77843.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max77843.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX77843 MicroUSB and Companion Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for Maxim MAX77843 MicroUSB Integrated
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max8952.yaml b/Documentation/devicetree/bindings/regulator/maxim,max8952.yaml
index e4e8c58f6046..3ff0d7d980e9 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max8952.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max8952.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX8952 voltage regulator
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
allOf:
- $ref: regulator.yaml#
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max8973.yaml b/Documentation/devicetree/bindings/regulator/maxim,max8973.yaml
index 5898dcf10f06..b92eef68c19f 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max8973.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max8973.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX8973/MAX77621 voltage regulator
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
allOf:
- $ref: regulator.yaml#
diff --git a/Documentation/devicetree/bindings/regulator/maxim,max8997.yaml b/Documentation/devicetree/bindings/regulator/maxim,max8997.yaml
index d5a44ca3df04..4321f061a7f6 100644
--- a/Documentation/devicetree/bindings/regulator/maxim,max8997.yaml
+++ b/Documentation/devicetree/bindings/regulator/maxim,max8997.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Maxim MAX8997 Power Management IC
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
The Maxim MAX8997 is a Power Management IC which includes voltage and current
diff --git a/Documentation/devicetree/bindings/regulator/richtek,rt5190a-regulator.yaml b/Documentation/devicetree/bindings/regulator/richtek,rt5190a-regulator.yaml
index 28725c5467fc..edb411be0390 100644
--- a/Documentation/devicetree/bindings/regulator/richtek,rt5190a-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/richtek,rt5190a-regulator.yaml
@@ -58,7 +58,7 @@ properties:
type: object
$ref: regulator.yaml#
description: |
- regulator description for buck1 and buck4.
+ regulator description for buck1 to buck4, and ldo.
properties:
regulator-allowed-modes:
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml
index 0627dec513da..0f9eb317ba9a 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPA01 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml
index e3b780715f44..f1c50dcd0b04 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPS11 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml
index 579d77aefc3f..53b105a4ead1 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPS13 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
index fdea290b3e94..01f9d4e236e9 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPS14 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml
index b3a883c94628..9576c2df45a6 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPS15 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml b/Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml
index 0ded6953e3b6..39b652c3c3c4 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S2MPU02 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
index 3c1617b66861..172631ca3c25 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S5M8767 Power Management IC regulators
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
This is a part of device tree bindings for S2M and S5M family of Power
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
index 2424de733ee4..d99a729d2710 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
@@ -104,8 +104,7 @@ properties:
qcom,smem-state-names:
$ref: /schemas/types.yaml#/definitions/string
description: The names of the state bits used for SMP2P output
- items:
- - const: stop
+ const: stop
glink-edge:
type: object
@@ -130,7 +129,6 @@ properties:
qcom,remote-pid:
$ref: /schemas/types.yaml#/definitions/uint32
description: ID of the shared memory used by GLINK for communication with WPSS
- maxItems: 1
required:
- interrupts
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
index b0c41ab1a746..cdfcf32c53fa 100644
--- a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
@@ -24,6 +24,11 @@ properties:
- const: hisilicon,hi3670-reset
- const: hisilicon,hi3660-reset
+ hisi,rst-syscon:
+ deprecated: true
+ description: phandle of the reset's syscon, use hisilicon,rst-syscon instead
+ $ref: /schemas/types.yaml#/definitions/phandle
+
hisilicon,rst-syscon:
description: phandle of the reset's syscon.
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml b/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
index 377a7d242323..6566804ec567 100644
--- a/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
@@ -55,6 +55,9 @@ properties:
"#reset-cells":
const: 1
+ resets:
+ maxItems: 1
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml b/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
index a50c34d5d199..765d9f9edd6e 100644
--- a/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
+++ b/Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC True Random Number Generator
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Łukasz Stelmach <l.stelmach@samsung.com>
properties:
diff --git a/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml b/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
index 84bf518a5549..4754174e9849 100644
--- a/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
+++ b/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TimerIO Random Number Generator
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/rtc/microchip,mfps-rtc.yaml b/Documentation/devicetree/bindings/rtc/microchip,mfps-rtc.yaml
index a2e984ea3553..500c62becd6b 100644
--- a/Documentation/devicetree/bindings/rtc/microchip,mfps-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/microchip,mfps-rtc.yaml
@@ -31,11 +31,19 @@ properties:
to that of the RTC's count register.
clocks:
- maxItems: 1
+ items:
+ - description: |
+ AHB clock
+ - description: |
+ Reference clock: divided by the prescaler to create a time-based
+ strobe (typically 1 Hz) for the calendar counter. By default, the rtc
+ on the PolarFire SoC shares it's reference with MTIMER so this will
+ be a 1 MHz clock.
clock-names:
items:
- const: rtc
+ - const: rtcref
required:
- compatible
@@ -48,11 +56,12 @@ additionalProperties: false
examples:
- |
+ #include "dt-bindings/clock/microchip,mpfs-clock.h"
rtc@20124000 {
compatible = "microchip,mpfs-rtc";
reg = <0x20124000 0x1000>;
- clocks = <&clkcfg 21>;
- clock-names = "rtc";
+ clocks = <&clkcfg CLK_RTC>, <&clkcfg CLK_RTCREF>;
+ clock-names = "rtc", "rtcref";
interrupts = <80>, <81>;
};
...
diff --git a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
index a98ed66d092e..0cabb773c397 100644
--- a/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
+++ b/Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
@@ -8,7 +8,7 @@ title: Samsung's Exynos USI (Universal Serial Interface) binding
maintainers:
- Sam Protsenko <semen.protsenko@linaro.org>
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C).
diff --git a/Documentation/devicetree/bindings/sound/samsung,arndale.yaml b/Documentation/devicetree/bindings/sound/samsung,arndale.yaml
index cea2bf3544f0..9bc4585bb6e5 100644
--- a/Documentation/devicetree/bindings/sound/samsung,arndale.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,arndale.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Insignal Arndale boards audio complex
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
properties:
diff --git a/Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml b/Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml
index cb51af90435e..ac151d3c1d77 100644
--- a/Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung SMDK5250 audio complex with WM8994 codec
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
properties:
diff --git a/Documentation/devicetree/bindings/sound/samsung,snow.yaml b/Documentation/devicetree/bindings/sound/samsung,snow.yaml
index 0c3b3302b842..51a83d3c7274 100644
--- a/Documentation/devicetree/bindings/sound/samsung,snow.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,snow.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Google Snow audio complex with MAX9809x codec
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
properties:
diff --git a/Documentation/devicetree/bindings/sound/samsung,tm2.yaml b/Documentation/devicetree/bindings/sound/samsung,tm2.yaml
index 74712d6f3ef4..491e08019c04 100644
--- a/Documentation/devicetree/bindings/sound/samsung,tm2.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung,tm2.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
- Sylwester Nawrocki <s.nawrocki@samsung.com>
properties:
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml b/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
index b3dbcba33e41..fe2e15504ebc 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
@@ -136,8 +136,7 @@ allOf:
compatible:
contains:
const: st,stm32f4-sai
-
- - then:
+ then:
properties:
clocks:
items:
@@ -148,8 +147,7 @@ allOf:
items:
- const: x8k
- const: x11k
-
- - else:
+ else:
properties:
clocks:
items:
diff --git a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
index b104899205f6..5de710adfa63 100644
--- a/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
+++ b/Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
@@ -124,7 +124,6 @@ properties:
description: |
Override the default TX fifo size. Unit is words. Ignored if 0.
$ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
default: 64
renesas,rx-fifo-size:
@@ -132,7 +131,6 @@ properties:
description: |
Override the default RX fifo size. Unit is words. Ignored if 0.
$ref: /schemas/types.yaml#/definitions/uint32
- maxItems: 1
default: 64
required:
diff --git a/Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml
index f0db3fb3d688..25b1b6c12d4d 100644
--- a/Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Peripheral-specific properties for Samsung S3C/S5P/Exynos SoC SPI controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description:
See spi-peripheral-props.yaml for more info.
diff --git a/Documentation/devicetree/bindings/spi/samsung,spi.yaml b/Documentation/devicetree/bindings/spi/samsung,spi.yaml
index bf9a76d931d2..a50f24f9359d 100644
--- a/Documentation/devicetree/bindings/spi/samsung,spi.yaml
+++ b/Documentation/devicetree/bindings/spi/samsung,spi.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S3C/S5P/Exynos SoC SPI controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description:
All the SPI controller nodes should be represented in the aliases node using
diff --git a/Documentation/devicetree/bindings/sram/sram.yaml b/Documentation/devicetree/bindings/sram/sram.yaml
index 668a9a41a775..993430be355b 100644
--- a/Documentation/devicetree/bindings/sram/sram.yaml
+++ b/Documentation/devicetree/bindings/sram/sram.yaml
@@ -136,14 +136,14 @@ required:
- reg
if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,rpm-msg-ram
- - rockchip,rk3288-pmu-sram
-
-else:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,rpm-msg-ram
+ - rockchip,rk3288-pmu-sram
+then:
required:
- "#address-cells"
- "#size-cells"
diff --git a/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml b/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
index 17129f75d962..1344df708e2d 100644
--- a/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC Thermal Management Unit (TMU)
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
description: |
For multi-instance tmu each instance should have an alias correctly numbered
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
index 22b91a27d776..6b9a3bcb3926 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC USB 3.0 DWC3 Controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
index fbf07d6e707a..9c92defbba01 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung Exynos SoC USB 2.0 EHCI/OHCI Controller
maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Krzysztof Kozlowski <krzk@kernel.org>
properties:
compatible:
@@ -62,6 +62,7 @@ required:
- interrupts
- phys
- phy-names
+ - reg
allOf:
- if:
diff --git a/Documentation/filesystems/caching/backend-api.rst b/Documentation/filesystems/caching/backend-api.rst
index be793c49a772..d7507becf674 100644
--- a/Documentation/filesystems/caching/backend-api.rst
+++ b/Documentation/filesystems/caching/backend-api.rst
@@ -73,7 +73,7 @@ busy.
If successful, the cache backend can then start setting up the cache. In the
event that the initialisation fails, the cache backend should call::
- void fscache_relinquish_cookie(struct fscache_cache *cache);
+ void fscache_relinquish_cache(struct fscache_cache *cache);
to reset and discard the cookie.
@@ -110,9 +110,9 @@ to withdraw them, calling::
on the cookie that each object belongs to. This schedules the specified cookie
for withdrawal. This gets offloaded to a workqueue. The cache backend can
-test for completion by calling::
+wait for completion by calling::
- bool fscache_are_objects_withdrawn(struct fscache_cookie *cache);
+ void fscache_wait_for_objects(struct fscache_cache *cache);
Once all the cookies are withdrawn, a cache backend can withdraw all the
volumes, calling::
@@ -125,7 +125,7 @@ outstanding accesses on the volume to complete before returning.
When the the cache is completely withdrawn, fscache should be notified by
calling::
- void fscache_cache_relinquish(struct fscache_cache *cache);
+ void fscache_relinquish_cache(struct fscache_cache *cache);
to clear fields in the cookie and discard the caller's ref on it.
diff --git a/Documentation/filesystems/caching/netfs-api.rst b/Documentation/filesystems/caching/netfs-api.rst
index 5066113acad5..7308d76a29dc 100644
--- a/Documentation/filesystems/caching/netfs-api.rst
+++ b/Documentation/filesystems/caching/netfs-api.rst
@@ -404,22 +404,21 @@ schedule a write of that region::
And if an error occurs before that point is reached, the marks can be removed
by calling::
- void fscache_clear_page_bits(struct fscache_cookie *cookie,
- struct address_space *mapping,
+ void fscache_clear_page_bits(struct address_space *mapping,
loff_t start, size_t len,
bool caching)
-In both of these functions, the cookie representing the cache object to be
-written to and a pointer to the mapping to which the source pages are attached
-are passed in; start and len indicate the size of the region that's going to be
-written (it doesn't have to align to page boundaries necessarily, but it does
-have to align to DIO boundaries on the backing filesystem). The caching
-parameter indicates if caching should be skipped, and if false, the functions
-do nothing.
-
-The write function takes some additional parameters: i_size indicates the size
-of the netfs file and term_func indicates an optional completion function, to
-which term_func_priv will be passed, along with the error or amount written.
+In these functions, a pointer to the mapping to which the source pages are
+attached is passed in and start and len indicate the size of the region that's
+going to be written (it doesn't have to align to page boundaries necessarily,
+but it does have to align to DIO boundaries on the backing filesystem). The
+caching parameter indicates if caching should be skipped, and if false, the
+functions do nothing.
+
+The write function takes some additional parameters: the cookie representing
+the cache object to be written to, i_size indicates the size of the netfs file
+and term_func indicates an optional completion function, to which
+term_func_priv will be passed, along with the error or amount written.
Note that the write function will always run asynchronously and will unmark all
the pages upon completion before calling term_func.
diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
index 54386a010a8d..871d2da7a0a9 100644
--- a/Documentation/filesystems/ext4/attributes.rst
+++ b/Documentation/filesystems/ext4/attributes.rst
@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
- Checksum of the extended attribute block.
* - 0x14
- \_\_u32
- - h\_reserved[2]
+ - h\_reserved[3]
- Zero.
The checksum is calculated against the FS UUID, the 64-bit block number
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index 4a2426f0485a..ad8dc8c040a2 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -235,12 +235,6 @@ offgrpjquota Turn off group journalled quota.
offprjjquota Turn off project journalled quota.
quota Enable plain user disk quota accounting.
noquota Disable all plain disk quota option.
-whint_mode=%s Control which write hints are passed down to block
- layer. This supports "off", "user-based", and
- "fs-based". In "off" mode (default), f2fs does not pass
- down hints. In "user-based" mode, f2fs tries to pass
- down hints given by users. And in "fs-based" mode, f2fs
- passes down hints with its policy.
alloc_mode=%s Adjust block allocation policy, which supports "reuse"
and "default".
fsync_mode=%s Control the policy of fsync. Currently supports "posix",
@@ -751,70 +745,6 @@ In order to identify whether the data in the victim segment are valid or not,
F2FS manages a bitmap. Each bit represents the validity of a block, and the
bitmap is composed of a bit stream covering whole blocks in main area.
-Write-hint Policy
------------------
-
-1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
-
-2) whint_mode=user-based. F2FS tries to pass down hints given by
-users.
-
-===================== ======================== ===================
-User F2FS Block
-===================== ======================== ===================
-N/A META WRITE_LIFE_NOT_SET
-N/A HOT_NODE "
-N/A WARM_NODE "
-N/A COLD_NODE "
-ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
-extension list " "
-
--- buffered io
-WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
-WRITE_LIFE_NONE " "
-WRITE_LIFE_MEDIUM " "
-WRITE_LIFE_LONG " "
-
--- direct io
-WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
-WRITE_LIFE_NONE " WRITE_LIFE_NONE
-WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
-WRITE_LIFE_LONG " WRITE_LIFE_LONG
-===================== ======================== ===================
-
-3) whint_mode=fs-based. F2FS passes down hints with its policy.
-
-===================== ======================== ===================
-User F2FS Block
-===================== ======================== ===================
-N/A META WRITE_LIFE_MEDIUM;
-N/A HOT_NODE WRITE_LIFE_NOT_SET
-N/A WARM_NODE "
-N/A COLD_NODE WRITE_LIFE_NONE
-ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
-extension list " "
-
--- buffered io
-WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
-WRITE_LIFE_NONE " "
-WRITE_LIFE_MEDIUM " "
-WRITE_LIFE_LONG " "
-
--- direct io
-WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
-WRITE_LIFE_NONE " WRITE_LIFE_NONE
-WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
-WRITE_LIFE_LONG " WRITE_LIFE_LONG
-===================== ======================== ===================
-
Fallocate(2) Policy
-------------------
diff --git a/Documentation/gpu/amdgpu/amdgpu-glossary.rst b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
index 859dcec6c6f9..db924d37f93e 100644
--- a/Documentation/gpu/amdgpu/amdgpu-glossary.rst
+++ b/Documentation/gpu/amdgpu/amdgpu-glossary.rst
@@ -8,12 +8,19 @@ we have a dedicated glossary for Display Core at
.. glossary::
+ active_cu_number
+ The number of CUs that are active on the system. The number of active
+ CUs may be less than SE * SH * CU depending on the board configuration.
+
CP
Command Processor
CPLIB
Content Protection Library
+ CU
+ Compute Unit
+
DFS
Digital Frequency Synthesizer
@@ -74,6 +81,12 @@ we have a dedicated glossary for Display Core at
SDMA
System DMA
+ SE
+ Shader Engine
+
+ SH
+ SHader array
+
SMU
System Management Unit
diff --git a/Documentation/gpu/drm-usage-stats.rst b/Documentation/gpu/drm-usage-stats.rst
new file mode 100644
index 000000000000..6c9f166a8d6f
--- /dev/null
+++ b/Documentation/gpu/drm-usage-stats.rst
@@ -0,0 +1,112 @@
+.. _drm-client-usage-stats:
+
+======================
+DRM client usage stats
+======================
+
+DRM drivers can choose to export partly standardised text output via the
+`fops->show_fdinfo()` as part of the driver specific file operations registered
+in the `struct drm_driver` object registered with the DRM core.
+
+One purpose of this output is to enable writing as generic as practicaly
+feasible `top(1)` like userspace monitoring tools.
+
+Given the differences between various DRM drivers the specification of the
+output is split between common and driver specific parts. Having said that,
+wherever possible effort should still be made to standardise as much as
+possible.
+
+File format specification
+=========================
+
+- File shall contain one key value pair per one line of text.
+- Colon character (`:`) must be used to delimit keys and values.
+- All keys shall be prefixed with `drm-`.
+- Whitespace between the delimiter and first non-whitespace character shall be
+ ignored when parsing.
+- Neither keys or values are allowed to contain whitespace characters.
+- Numerical key value pairs can end with optional unit string.
+- Data type of the value is fixed as defined in the specification.
+
+Key types
+---------
+
+1. Mandatory, fully standardised.
+2. Optional, fully standardised.
+3. Driver specific.
+
+Data types
+----------
+
+- <uint> - Unsigned integer without defining the maximum value.
+- <str> - String excluding any above defined reserved characters or whitespace.
+
+Mandatory fully standardised keys
+---------------------------------
+
+- drm-driver: <str>
+
+String shall contain the name this driver registered as via the respective
+`struct drm_driver` data structure.
+
+Optional fully standardised keys
+--------------------------------
+
+- drm-pdev: <aaaa:bb.cc.d>
+
+For PCI devices this should contain the PCI slot address of the device in
+question.
+
+- drm-client-id: <uint>
+
+Unique value relating to the open DRM file descriptor used to distinguish
+duplicated and shared file descriptors. Conceptually the value should map 1:1
+to the in kernel representation of `struct drm_file` instances.
+
+Uniqueness of the value shall be either globally unique, or unique within the
+scope of each device, in which case `drm-pdev` shall be present as well.
+
+Userspace should make sure to not double account any usage statistics by using
+the above described criteria in order to associate data to individual clients.
+
+- drm-engine-<str>: <uint> ns
+
+GPUs usually contain multiple execution engines. Each shall be given a stable
+and unique name (str), with possible values documented in the driver specific
+documentation.
+
+Value shall be in specified time units which the respective GPU engine spent
+busy executing workloads belonging to this client.
+
+Values are not required to be constantly monotonic if it makes the driver
+implementation easier, but are required to catch up with the previously reported
+larger value within a reasonable period. Upon observing a value lower than what
+was previously read, userspace is expected to stay with that larger previous
+value until a monotonic update is seen.
+
+- drm-engine-capacity-<str>: <uint>
+
+Engine identifier string must be the same as the one specified in the
+drm-engine-<str> tag and shall contain a greater than zero number in case the
+exported engine corresponds to a group of identical hardware engines.
+
+In the absence of this tag parser shall assume capacity of one. Zero capacity
+is not allowed.
+
+- drm-memory-<str>: <uint> [KiB|MiB]
+
+Each possible memory type which can be used to store buffer objects by the
+GPU in question shall be given a stable and unique name to be returned as the
+string here.
+
+Value shall reflect the amount of storage currently consumed by the buffer
+object belong to this client, in the respective memory region.
+
+Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB'
+indicating kibi- or mebi-bytes.
+
+===============================
+Driver specific implementations
+===============================
+
+:ref:`i915-usage-stats`
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 0f08693d05cd..54060cd6c419 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -697,3 +697,31 @@ The style guide for ``i915_reg.h``.
.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
:doc: The i915 register macro definition style guide
+
+.. _i915-usage-stats:
+
+i915 DRM client usage stats implementation
+==========================================
+
+The drm/i915 driver implements the DRM client usage stats specification as
+documented in :ref:`drm-client-usage-stats`.
+
+Example of the output showing the implemented key value pairs and entirety of
+the currently possible format options:
+
+::
+
+ pos: 0
+ flags: 0100002
+ mnt_id: 21
+ drm-driver: i915
+ drm-pdev: 0000:00:02.0
+ drm-client-id: 7
+ drm-engine-render: 9288864723 ns
+ drm-engine-copy: 2035071108 ns
+ drm-engine-video: 0 ns
+ drm-engine-capacity-video: 2
+ drm-engine-video-enhance: 0 ns
+
+Possible `drm-engine-` key names are: `render`, `copy`, `video` and
+`video-enhance`.
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index b9c1214d8f23..b99dede9a5b1 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -10,6 +10,7 @@ Linux GPU Driver Developer's Guide
drm-kms
drm-kms-helpers
drm-uapi
+ drm-usage-stats
driver-uapi
drm-client
drivers
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index 525e6842dd33..43be3782e5df 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -894,7 +894,7 @@ xmit_hash_policy
Uses XOR of hardware MAC addresses and packet type ID
field to generate the hash. The formula is
- hash = source MAC XOR destination MAC XOR packet type ID
+ hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
slave number = hash modulo slave count
This algorithm will place all traffic to a particular
@@ -910,7 +910,7 @@ xmit_hash_policy
Uses XOR of hardware MAC addresses and IP addresses to
generate the hash. The formula is
- hash = source MAC XOR destination MAC XOR packet type ID
+ hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
hash = hash XOR source IP XOR destination IP
hash = hash XOR (hash RSHIFT 16)
hash = hash XOR (hash RSHIFT 8)
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index b0024aa7b051..66828293d9cb 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -267,6 +267,13 @@ ipfrag_max_dist - INTEGER
from different IP datagrams, which could result in data corruption.
Default: 64
+bc_forwarding - INTEGER
+ bc_forwarding enables the feature described in rfc1812#section-5.3.5.2
+ and rfc2644. It allows the router to forward directed broadcast.
+ To enable this feature, the 'all' entry and the input interface entry
+ should be set to 1.
+ Default: 0
+
INET peer storage
=================
diff --git a/Documentation/security/siphash.rst b/Documentation/security/siphash.rst
index bd9363025fcb..a10380cb78e5 100644
--- a/Documentation/security/siphash.rst
+++ b/Documentation/security/siphash.rst
@@ -121,26 +121,36 @@ even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
instead of SipHash's 128-bit key. However, this may appeal to some
high-performance `jhash` users.
-Danger!
-
-Do not ever use HalfSipHash except for as a hashtable key function, and only
-then when you can be absolutely certain that the outputs will never be
-transmitted out of the kernel. This is only remotely useful over `jhash` as a
-means of mitigating hashtable flooding denial of service attacks.
-
-Generating a HalfSipHash key
-============================
+HalfSipHash support is provided through the "hsiphash" family of functions.
+
+.. warning::
+ Do not ever use the hsiphash functions except for as a hashtable key
+ function, and only then when you can be absolutely certain that the outputs
+ will never be transmitted out of the kernel. This is only remotely useful
+ over `jhash` as a means of mitigating hashtable flooding denial of service
+ attacks.
+
+On 64-bit kernels, the hsiphash functions actually implement SipHash-1-3, a
+reduced-round variant of SipHash, instead of HalfSipHash-1-3. This is because in
+64-bit code, SipHash-1-3 is no slower than HalfSipHash-1-3, and can be faster.
+Note, this does *not* mean that in 64-bit kernels the hsiphash functions are the
+same as the siphash ones, or that they are secure; the hsiphash functions still
+use a less secure reduced-round algorithm and truncate their outputs to 32
+bits.
+
+Generating a hsiphash key
+=========================
Keys should always be generated from a cryptographically secure source of
-random numbers, either using get_random_bytes or get_random_once:
+random numbers, either using get_random_bytes or get_random_once::
-hsiphash_key_t key;
-get_random_bytes(&key, sizeof(key));
+ hsiphash_key_t key;
+ get_random_bytes(&key, sizeof(key));
If you're not deriving your key from here, you're doing it wrong.
-Using the HalfSipHash functions
-===============================
+Using the hsiphash functions
+============================
There are two variants of the function, one that takes a list of integers, and
one that takes a buffer::
@@ -183,7 +193,7 @@ You may then iterate like usual over the returned hash bucket.
Performance
===========
-HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
-this will not be a problem, as the hashtable lookup isn't the bottleneck. And
-in general, this is probably a good sacrifice to make for the security and DoS
-resistance of HalfSipHash.
+hsiphash() is roughly 3 times slower than jhash(). For many replacements, this
+will not be a problem, as the hashtable lookup isn't the bottleneck. And in
+general, this is probably a good sacrifice to make for the security and DoS
+resistance of hsiphash().
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index d13fa6600467..4a900cdbc62e 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -5986,16 +5986,16 @@ should put the acknowledged interrupt vector into the 'epr' field.
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
- __u64 flags;
+ __u32 ndata;
+ __u64 data[16];
} system_event;
If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered
a system-level event using some architecture specific mechanism (hypercall
or some special instruction). In case of ARM64, this is triggered using
-HVC instruction based PSCI call from the vcpu. The 'type' field describes
-the system-level event type. The 'flags' field describes architecture
-specific flags for the system-level event.
+HVC instruction based PSCI call from the vcpu.
+The 'type' field describes the system-level event type.
Valid values for 'type' are:
- KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
@@ -6010,10 +6010,20 @@ Valid values for 'type' are:
to ignore the request, or to gather VM memory core dump and/or
reset/shutdown of the VM.
-Valid flags are:
+If KVM_CAP_SYSTEM_EVENT_DATA is present, the 'data' field can contain
+architecture specific information for the system-level event. Only
+the first `ndata` items (possibly zero) of the data array are valid.
- - KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (arm64 only) -- the guest issued
- a SYSTEM_RESET2 call according to v1.1 of the PSCI specification.
+ - for arm64, data[0] is set to KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 if
+ the guest issued a SYSTEM_RESET2 call according to v1.1 of the PSCI
+ specification.
+
+ - for RISC-V, data[0] is set to the value of the second argument of the
+ ``sbi_system_reset`` call.
+
+Previous versions of Linux defined a `flags` member in this struct. The
+field is now aliased to `data[0]`. Userspace can assume that it is only
+written if ndata is greater than 0.
::
@@ -6190,6 +6200,7 @@ Valid values for 'type' are:
unsigned long args[6];
unsigned long ret[2];
} riscv_sbi;
+
If exit reason is KVM_EXIT_RISCV_SBI then it indicates that the VCPU has
done a SBI call which is not handled by KVM RISC-V kernel module. The details
of the SBI call are available in 'riscv_sbi' member of kvm_run structure. The
diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index db43ee571f5a..31f62b64e07b 100644
--- a/Documentation/virt/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
=================
KVM VCPU Requests
=================
diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index 1c6847fff304..2d307811978c 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
======================================
Secure Encrypted Virtualization (SEV)
======================================
diff --git a/Documentation/virt/kvm/x86/errata.rst b/Documentation/virt/kvm/x86/errata.rst
index 806f049b6975..410e0aa63493 100644
--- a/Documentation/virt/kvm/x86/errata.rst
+++ b/Documentation/virt/kvm/x86/errata.rst
@@ -1,3 +1,4 @@
+.. SPDX-License-Identifier: GPL-2.0
=======================================
Known limitations of CPU virtualization
@@ -36,4 +37,3 @@ Nested virtualization features
------------------------------
TBD
-
diff --git a/Documentation/virt/kvm/x86/running-nested-guests.rst b/Documentation/virt/kvm/x86/running-nested-guests.rst
index bd70c69468ae..a27e6768d900 100644
--- a/Documentation/virt/kvm/x86/running-nested-guests.rst
+++ b/Documentation/virt/kvm/x86/running-nested-guests.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
==============================
Running nested guests with KVM
==============================
diff --git a/Documentation/vm/page_owner.rst b/Documentation/vm/page_owner.rst
index 65204d7f004f..7e0c3f574e78 100644
--- a/Documentation/vm/page_owner.rst
+++ b/Documentation/vm/page_owner.rst
@@ -110,7 +110,7 @@ Usage
If you want to sort by the page nums of buf, use the ``-m`` parameter.
The detailed parameters are:
- fundamental function:
+ fundamental function::
Sort:
-a Sort by memory allocation time.
@@ -122,7 +122,7 @@ Usage
-s Sort by stack trace.
-t Sort by times (default).
- additional function:
+ additional function::
Cull:
--cull <rules>
@@ -153,6 +153,7 @@ Usage
STANDARD FORMAT SPECIFIERS
==========================
+::
KEY LONG DESCRIPTION
p pid process ID
diff --git a/MAINTAINERS b/MAINTAINERS
index 9b2b0dc44506..1a04950c1a8f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -201,6 +201,7 @@ F: include/net/ieee80211_radiotap.h
F: include/net/iw_handler.h
F: include/net/wext.h
F: include/uapi/linux/nl80211.h
+F: include/uapi/linux/wireless.h
F: net/wireless/
8169 10/100/1000 GIGABIT ETHERNET DRIVER
@@ -2636,13 +2637,14 @@ F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
+B: mailto:linux-samsung-soc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/
@@ -3742,7 +3744,7 @@ F: include/linux/platform_data/b53.h
BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
M: Nicolas Saenz Julienne <nsaenz@kernel.org>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -3757,7 +3759,7 @@ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: Ray Jui <rjui@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
S: Maintained
T: git git://github.com/broadcom/mach-bcm
F: arch/arm/mach-bcm/
@@ -3777,7 +3779,7 @@ F: arch/mips/include/asm/mach-bcm47xx/*
BROADCOM BCM4908 ETHERNET DRIVER
M: Rafał Miłecki <rafal@milecki.pl>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -3786,7 +3788,7 @@ F: drivers/net/ethernet/broadcom/unimac.h
BROADCOM BCM4908 PINMUX DRIVER
M: Rafał Miłecki <rafal@milecki.pl>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pinctrl/brcm,bcm4908-pinctrl.yaml
@@ -3796,7 +3798,7 @@ BROADCOM BCM5301X ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/bcm470*
@@ -3807,7 +3809,7 @@ F: arch/arm/mach-bcm/bcm_5301x.c
BROADCOM BCM53573 ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
M: Rafał Miłecki <rafal@milecki.pl>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/bcm47189*
@@ -3815,7 +3817,7 @@ F: arch/arm/boot/dts/bcm53573*
BROADCOM BCM63XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://github.com/broadcom/stblinux.git
@@ -3829,7 +3831,7 @@ F: drivers/usb/gadget/udc/bcm63xx_udc.*
BROADCOM BCM7XXX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://github.com/broadcom/stblinux.git
@@ -3847,21 +3849,21 @@ N: bcm7120
BROADCOM BDC DRIVER
M: Al Cooper <alcooperx@gmail.com>
L: linux-usb@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
S: Maintained
F: Documentation/devicetree/bindings/usb/brcm,bdc.yaml
F: drivers/usb/gadget/udc/bdc/
BROADCOM BMIPS CPUFREQ DRIVER
M: Markus Mayer <mmayer@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-pm@vger.kernel.org
S: Maintained
F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-mips@vger.kernel.org
S: Maintained
T: git git://github.com/broadcom/stblinux.git
@@ -3912,7 +3914,9 @@ BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
+F: drivers/firmware/broadcom/tee_bnxt_fw.c
F: drivers/net/ethernet/broadcom/bnxt/
+F: include/linux/firmware/broadcom/tee_bnxt_fw.h
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
M: Arend van Spriel <aspriel@gmail.com>
@@ -3927,53 +3931,53 @@ F: drivers/net/wireless/broadcom/brcm80211/
BROADCOM BRCMSTB GPIO DRIVER
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
F: drivers/gpio/gpio-brcmstb.c
BROADCOM BRCMSTB I2C DRIVER
M: Kamal Dasu <kdasu.kdev@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-i2c@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Supported
F: Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
F: drivers/i2c/busses/i2c-brcmstb.c
BROADCOM BRCMSTB UART DRIVER
M: Al Cooper <alcooperx@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-serial@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
F: drivers/tty/serial/8250/8250_bcm7271.c
BROADCOM BRCMSTB USB EHCI DRIVER
M: Al Cooper <alcooperx@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-usb@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
F: drivers/usb/host/ehci-brcm.*
BROADCOM BRCMSTB USB PIN MAP DRIVER
M: Al Cooper <alcooperx@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-usb@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
F: drivers/usb/misc/brcmstb-usb-pinmap.c
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
M: Al Cooper <alcooperx@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-kernel@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/phy/broadcom/phy-brcm-usb*
BROADCOM ETHERNET PHY DRIVERS
M: Florian Fainelli <f.fainelli@gmail.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
@@ -3984,7 +3988,7 @@ F: include/linux/brcmphy.h
BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
@@ -3998,7 +4002,7 @@ F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://github.com/broadcom/stblinux.git
@@ -4026,7 +4030,7 @@ N: stingray
BROADCOM IPROC GBIT ETHERNET DRIVER
M: Rafał Miłecki <rafal@milecki.pl>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/brcm,amac.yaml
@@ -4035,7 +4039,7 @@ F: drivers/net/ethernet/broadcom/unimac.h
BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
F: drivers/gpio/gpio-bcm-kona.c
@@ -4068,7 +4072,7 @@ F: drivers/firmware/broadcom/*
BROADCOM PMB (POWER MANAGEMENT BUS) DRIVER
M: Rafał Miłecki <rafal@milecki.pl>
M: Florian Fainelli <f.fainelli@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://github.com/broadcom/stblinux.git
@@ -4084,7 +4088,7 @@ F: include/linux/bcma/
BROADCOM SPI DRIVER
M: Kamal Dasu <kdasu.kdev@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
S: Maintained
F: Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
F: drivers/spi/spi-bcm-qspi.*
@@ -4093,7 +4097,7 @@ F: drivers/spi/spi-iproc-qspi.c
BROADCOM STB AVS CPUFREQ DRIVER
M: Markus Mayer <mmayer@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-pm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -4101,7 +4105,7 @@ F: drivers/cpufreq/brcmstb*
BROADCOM STB AVS TMON DRIVER
M: Markus Mayer <mmayer@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-pm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -4109,7 +4113,7 @@ F: drivers/thermal/broadcom/brcmstb*
BROADCOM STB DPFE DRIVER
M: Markus Mayer <mmayer@broadcom.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
@@ -4118,8 +4122,8 @@ F: drivers/memory/brcmstb_dpfe.c
BROADCOM STB NAND FLASH DRIVER
M: Brian Norris <computersforpeace@gmail.com>
M: Kamal Dasu <kdasu.kdev@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-mtd@lists.infradead.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/mtd/nand/raw/brcmnand/
F: include/linux/platform_data/brcmnand.h
@@ -4128,7 +4132,7 @@ BROADCOM STB PCIE DRIVER
M: Jim Quinlan <jim2101024@gmail.com>
M: Nicolas Saenz Julienne <nsaenz@kernel.org>
M: Florian Fainelli <f.fainelli@gmail.com>
-M: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -4136,7 +4140,7 @@ F: drivers/pci/controller/pcie-brcmstb.c
BROADCOM SYSTEMPORT ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bcmsysport.*
@@ -4153,7 +4157,7 @@ F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM VK DRIVER
M: Scott Branden <scott.branden@broadcom.com>
-L: bcm-kernel-feedback-list@broadcom.com
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
S: Supported
F: drivers/misc/bcm-vk/
F: include/uapi/linux/misc/bcm_vk.h
@@ -9347,14 +9351,12 @@ F: drivers/pci/hotplug/rpaphp*
IBM Power SRIOV Virtual NIC Device Driver
M: Dany Madden <drt@linux.ibm.com>
-M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
R: Thomas Falcon <tlfalcon@linux.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmvnic.*
IBM Power Virtual Accelerator Switchboard
-M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: arch/powerpc/include/asm/vas.h
@@ -9996,6 +9998,7 @@ S: Supported
F: Documentation/driver-api/mei/*
F: drivers/misc/mei/
F: drivers/watchdog/mei_wdt.c
+F: include/linux/mei_aux.h
F: include/linux/mei_cl_bus.h
F: include/uapi/linux/mei.h
F: samples/mei/*
@@ -10249,8 +10252,6 @@ F: drivers/net/ethernet/sgi/ioc3-eth.c
IOMAP FILESYSTEM LIBRARY
M: Christoph Hellwig <hch@infradead.org>
M: Darrick J. Wong <djwong@kernel.org>
-M: linux-xfs@vger.kernel.org
-M: linux-fsdevel@vger.kernel.org
L: linux-xfs@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Supported
@@ -10381,6 +10382,7 @@ F: include/linux/isapnp.h
ISCSI
M: Lee Duncan <lduncan@suse.com>
M: Chris Leech <cleech@redhat.com>
+M: Mike Christie <michael.christie@oracle.com>
L: open-iscsi@googlegroups.com
L: linux-scsi@vger.kernel.org
S: Maintained
@@ -10558,6 +10560,7 @@ M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
R: Alexander Potapenko <glider@google.com>
R: Andrey Konovalov <andreyknvl@gmail.com>
R: Dmitry Vyukov <dvyukov@google.com>
+R: Vincenzo Frascino <vincenzo.frascino@arm.com>
L: kasan-dev@googlegroups.com
S: Maintained
F: Documentation/dev-tools/kasan.rst
@@ -11917,7 +11920,7 @@ F: drivers/iio/proximity/mb1232.c
MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
R: Iskren Chernev <iskren.chernev@gmail.com>
-R: Krzysztof Kozlowski <krzk@kernel.org>
+R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
R: Marek Szyprowski <m.szyprowski@samsung.com>
R: Matheus Castello <matheus@castello.eng.br>
L: linux-pm@vger.kernel.org
@@ -11927,7 +11930,7 @@ F: drivers/power/supply/max17040_battery.c
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
R: Hans de Goede <hdegoede@redhat.com>
-R: Krzysztof Kozlowski <krzk@kernel.org>
+R: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
R: Marek Szyprowski <m.szyprowski@samsung.com>
R: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
R: Purism Kernel Team <kernel@puri.sm>
@@ -11979,10 +11982,11 @@ F: Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
F: drivers/power/supply/max77976_charger.c
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-pm@vger.kernel.org
S: Supported
+B: mailto:linux-samsung-soc@vger.kernel.org
F: Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
F: Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
F: drivers/power/supply/max14577_charger.c
@@ -11990,10 +11994,11 @@ F: drivers/power/supply/max77693_charger.c
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
M: Chanwoo Choi <cw00.choi@samsung.com>
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-kernel@vger.kernel.org
S: Supported
+B: mailto:linux-samsung-soc@vger.kernel.org
F: Documentation/devicetree/bindings/*/maxim,max14577.yaml
F: Documentation/devicetree/bindings/*/maxim,max77686.yaml
F: Documentation/devicetree/bindings/*/maxim,max77693.yaml
@@ -12413,7 +12418,7 @@ F: drivers/mmc/host/mtk-sd.c
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd@nbd.name>
-M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M: Lorenzo Bianconi <lorenzo@kernel.org>
M: Ryder Lee <ryder.lee@mediatek.com>
R: Shayne Chen <shayne.chen@mediatek.com>
R: Sean Wang <sean.wang@mediatek.com>
@@ -12684,9 +12689,10 @@ F: mm/memblock.c
F: tools/testing/memblock/
MEMORY CONTROLLER DRIVERS
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-kernel@vger.kernel.org
S: Maintained
+B: mailto:krzysztof.kozlowski@linaro.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
F: Documentation/devicetree/bindings/memory-controllers/
F: drivers/memory/
@@ -13635,6 +13641,7 @@ F: net/core/drop_monitor.c
NETWORKING DRIVERS
M: "David S. Miller" <davem@davemloft.net>
+M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org
@@ -13682,6 +13689,7 @@ F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
+M: Eric Dumazet <edumazet@google.com>
M: Jakub Kicinski <kuba@kernel.org>
M: Paolo Abeni <pabeni@redhat.com>
L: netdev@vger.kernel.org
@@ -13828,10 +13836,11 @@ F: include/uapi/linux/nexthop.h
F: net/ipv4/nexthop.c
NFC SUBSYSTEM
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-nfc@lists.01.org (subscribers-only)
L: netdev@vger.kernel.org
S: Maintained
+B: mailto:linux-nfc@lists.01.org
F: Documentation/devicetree/bindings/net/nfc/
F: drivers/nfc/
F: include/linux/platform_data/nfcmrvl.h
@@ -14145,7 +14154,7 @@ F: Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
F: drivers/regulator/pf8x00-regulator.c
NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -14698,7 +14707,7 @@ F: scripts/dtc/
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
-M: Krzysztof Kozlowski <krzk+dt@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
L: devicetree@vger.kernel.org
S: Maintained
C: irc://irc.libera.chat/devicetree
@@ -15610,7 +15619,7 @@ F: drivers/pinctrl/renesas/
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <tomasz.figa@gmail.com>
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -15618,6 +15627,7 @@ L: linux-samsung-soc@vger.kernel.org
S: Maintained
C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
+B: mailto:linux-samsung-soc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
F: Documentation/devicetree/bindings/pinctrl/samsung,pinctrl*yaml
F: drivers/pinctrl/samsung/
@@ -16448,6 +16458,7 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
B: https://gitlab.freedesktop.org/drm/amd/-/issues
C: irc://irc.oftc.net/radeon
+F: Documentation/gpu/amdgpu/
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
@@ -17285,7 +17296,7 @@ W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/s390/scsi/zfcp_*
S3C ADC BATTERY DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-samsung-soc@vger.kernel.org
S: Odd Fixes
F: drivers/power/supply/s3c_adc_battery.c
@@ -17330,15 +17341,16 @@ F: Documentation/admin-guide/LSM/SafeSetID.rst
F: security/safesetid/
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
+B: mailto:linux-samsung-soc@vger.kernel.org
F: Documentation/devicetree/bindings/sound/samsung*
F: sound/soc/samsung/
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -17373,11 +17385,12 @@ S: Maintained
F: drivers/platform/x86/samsung-laptop.c
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-kernel@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Supported
+B: mailto:linux-samsung-soc@vger.kernel.org
F: Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
F: Documentation/devicetree/bindings/mfd/samsung,s2m*.yaml
F: Documentation/devicetree/bindings/mfd/samsung,s5m*.yaml
@@ -17399,7 +17412,7 @@ F: drivers/media/platform/samsung/s3c-camif/
F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Krzysztof Opasiak <k.opasiak@samsung.com>
L: linux-nfc@lists.01.org (subscribers-only)
S: Maintained
@@ -17421,7 +17434,7 @@ S: Supported
F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Vladimir Zapolskiy <vz@mleia.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
@@ -17456,7 +17469,7 @@ F: include/linux/clk/samsung.h
F: include/linux/platform_data/clk-s3c2410.h
SAMSUNG SPI DRIVERS
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
M: Andi Shyti <andi@etezian.org>
L: linux-spi@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
@@ -17474,7 +17487,7 @@ F: drivers/net/ethernet/samsung/sxgbe/
SAMSUNG THERMAL DRIVER
M: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
L: linux-pm@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -17658,8 +17671,8 @@ K: \bTIF_SECCOMP\b
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER
M: Al Cooper <alcooperx@gmail.com>
+R: Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
L: linux-mmc@vger.kernel.org
-L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/mmc/host/sdhci-brcmstb*
@@ -21238,10 +21251,8 @@ S: Maintained
F: drivers/hid/hid-wiimote*
WILOCITY WIL6210 WIRELESS DRIVER
-M: Maya Erez <merez@codeaurora.org>
L: linux-wireless@vger.kernel.org
-L: wil6210@qti.qualcomm.com
-S: Supported
+S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
F: drivers/net/wireless/ath/wil6210/
@@ -21445,6 +21456,15 @@ F: arch/x86/include/asm/uv/
F: arch/x86/kernel/apic/x2apic_uv_x.c
F: arch/x86/platform/uv/
+X86 STACK UNWINDING
+M: Josh Poimboeuf <jpoimboe@redhat.com>
+M: Peter Zijlstra <peterz@infradead.org>
+S: Supported
+F: arch/x86/include/asm/unwind*.h
+F: arch/x86/kernel/dumpstack.c
+F: arch/x86/kernel/stacktrace.c
+F: arch/x86/kernel/unwind_*.c
+
X86 VDSO
M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org
@@ -21607,7 +21627,6 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
C: irc://irc.oftc.net/xfs
M: Darrick J. Wong <djwong@kernel.org>
-M: linux-xfs@vger.kernel.org
L: linux-xfs@vger.kernel.org
S: Supported
W: http://xfs.org/
diff --git a/Makefile b/Makefile
index 29e273d3f8cc..9a820c525b86 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
NAME = Superb Owl
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 29b0167c088b..31c4fdc4a4ba 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -854,10 +854,8 @@ config HAVE_ARCH_HUGE_VMAP
#
# Archs that select this would be capable of PMD-sized vmaps (i.e.,
-# arch_vmap_pmd_supported() returns true), and they must make no assumptions
-# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag
-# can be used to prohibit arch-specific allocations from using hugepages to
-# help with this (e.g., modules may require it).
+# arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag
+# must be used to enable allocations to use hugepages.
#
config HAVE_ARCH_HUGE_VMALLOC
depends on HAVE_ARCH_HUGE_VMAP
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index dcaa44e408ac..f48ba03e9b5e 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -275,7 +275,7 @@
cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
<&creg_gpio 1 GPIO_ACTIVE_LOW>;
- spi-flash@0 {
+ flash@0 {
compatible = "sst26wf016b", "jedec,spi-nor";
reg = <0>;
#address-cells = <1>;
diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
index 088d348781c1..1b0ffaeee16d 100644
--- a/arch/arc/include/asm/atomic-llsc.h
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -5,7 +5,7 @@
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define ATOMIC_OP(op, c_op, asm_op) \
+#define ATOMIC_OP(op, asm_op) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
@@ -21,7 +21,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: "cc"); \
} \
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned int val; \
@@ -42,7 +42,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+#define ATOMIC_FETCH_OP(op, asm_op) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
@@ -69,23 +69,23 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op) \
+ ATOMIC_FETCH_OP(op, asm_op)
-ATOMIC_OPS(add, +=, add)
-ATOMIC_OPS(sub, -=, sub)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_FETCH_OP(op, asm_op)
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(andnot, &= ~, bic)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
#define arch_atomic_andnot arch_atomic_andnot
diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h
index 7848348719b2..64ca25d199be 100644
--- a/arch/arc/include/asm/pgtable-levels.h
+++ b/arch/arc/include/asm/pgtable-levels.h
@@ -98,9 +98,6 @@
/*
* 1st level paging: pgd
*/
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-#define pgd_offset(mm, addr) (((mm)->pgd) + pgd_index(addr))
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
#define pgd_ERROR(e) \
pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index 03f8b1be0c3a..1e1db51b6941 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -366,7 +366,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
/* note: we are ignoring possibility of:
* ADD_S, SUB_S, PUSH_S, POP_S as these should not
- * cause unaliged exception anyway */
+ * cause unaligned exception anyway */
state->write = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 5, 5);
if (state->zz)
@@ -503,7 +503,6 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
{
struct disasm_state instr;
- memset(&instr, 0, sizeof(struct disasm_state));
disasm_instr(pc, &instr, 0, regs, cregs);
*next_pc = pc + instr.instr_len;
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index dd77a0c8f740..66ba549b520f 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -196,6 +196,7 @@ tracesys_exit:
st r0, [sp, PT_r0] ; sys call return value in pt_regs
;POST Sys Call Ptrace Hook
+ mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
; we'd done before calling post hook above
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index f748483628f2..3c1590c27fae 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -319,7 +319,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
/*
- * handler returns using sigreturn stub provided already by userpsace
+ * handler returns using sigreturn stub provided already by userspace
* If not, nuke the process right away
*/
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 78e6d069b1c1..d947473f1e6d 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
struct plat_smp_ops __weak plat_smp_ops;
-/* XXX: per cpu ? Only needed once in early seconday boot */
+/* XXX: per cpu ? Only needed once in early secondary boot */
struct task_struct *secondary_idle_tsk;
/* Called from start_kernel */
@@ -274,7 +274,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
* and read back old value
*/
do {
- new = old = READ_ONCE(*ipi_data_ptr);
+ new = old = *ipi_data_ptr;
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index d63ebd81f1c6..99a9b92ed98d 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -237,7 +237,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
if (state.fault)
goto fault;
- /* clear any remanants of delay slot */
+ /* clear any remnants of delay slot */
if (delay_mode(regs)) {
regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK;
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 8aa1231865d1..5446967ea98d 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -401,7 +401,7 @@ static inline void __before_dc_op(const int op)
{
if (op == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV
- * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1
* So toggle INV sub-mode depending on op request and default
*/
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index c9629cb5ccd1..7da42a5b959c 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -263,6 +263,8 @@
compatible = "ti,am3359-tscadc";
reg = <0x0 0x1000>;
interrupts = <16>;
+ clocks = <&adc_tsc_fck>;
+ clock-names = "fck";
status = "disabled";
dmas = <&edma 53 0>, <&edma 57 0>;
dma-names = "fifo0", "fifo1";
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index 0d2fac98ce7d..c8b80f156ec9 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -161,6 +161,8 @@
/* HS USB Host PHY on PORT 1 */
hsusb1_phy: hsusb1_phy {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsusb1_rst_pins>;
compatible = "usb-nop-xceiv";
reset-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>; /* gpio_57 */
#phy-cells = <0>;
@@ -168,7 +170,9 @@
};
&davinci_emac {
- status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ethernet_pins>;
+ status = "okay";
};
&davinci_mdio {
@@ -193,6 +197,8 @@
};
&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
clock-frequency = <400000>;
/* User DIP swithes [1:8] / User LEDS [1:2] */
tca6416: gpio@21 {
@@ -205,6 +211,8 @@
};
&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
clock-frequency = <400000>;
};
@@ -223,6 +231,8 @@
};
&usbhshost {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsusb1_pins>;
port1-mode = "ehci-phy";
};
@@ -231,8 +241,35 @@
};
&omap3_pmx_core {
- pinctrl-names = "default";
- pinctrl-0 = <&hsusb1_rst_pins>;
+
+ ethernet_pins: pinmux_ethernet_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21fe, PIN_INPUT | MUX_MODE0) /* rmii_mdio_data */
+ OMAP3_CORE1_IOPAD(0x2200, MUX_MODE0) /* rmii_mdio_clk */
+ OMAP3_CORE1_IOPAD(0x2202, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd0 */
+ OMAP3_CORE1_IOPAD(0x2204, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd1 */
+ OMAP3_CORE1_IOPAD(0x2206, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_crs_dv */
+ OMAP3_CORE1_IOPAD(0x2208, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_rxer */
+ OMAP3_CORE1_IOPAD(0x220a, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd0 */
+ OMAP3_CORE1_IOPAD(0x220c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd1 */
+ OMAP3_CORE1_IOPAD(0x220e, PIN_OUTPUT_PULLDOWN |MUX_MODE0) /* rmii_txen */
+ OMAP3_CORE1_IOPAD(0x2210, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_50mhz_clk */
+ >;
+ };
+
+ i2c2_pins: pinmux_i2c2_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_scl */
+ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_sda */
+ >;
+ };
+
+ i2c3_pins: pinmux_i2c3_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_scl */
+ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */
+ >;
+ };
leds_pins: pinmux_leds_pins {
pinctrl-single,pins = <
@@ -300,8 +337,6 @@
};
&omap3_pmx_core2 {
- pinctrl-names = "default";
- pinctrl-0 = <&hsusb1_pins>;
hsusb1_pins: pinmux_hsusb1_pins {
pinctrl-single,pins = <
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
index 8b669e2eafec..f7b680f6c48a 100644
--- a/arch/arm/boot/dts/am3517-som.dtsi
+++ b/arch/arm/boot/dts/am3517-som.dtsi
@@ -69,6 +69,8 @@
};
&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
clock-frequency = <400000>;
s35390a: s35390a@30 {
@@ -179,6 +181,13 @@
&omap3_pmx_core {
+ i2c1_pins: pinmux_i2c1_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl */
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda */
+ >;
+ };
+
wl12xx_buffer_pins: pinmux_wl12xx_buffer_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x2156, PIN_OUTPUT | MUX_MODE4) /* mmc1_dat7.gpio_129 */
diff --git a/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi b/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
index c1c8650dafce..3542ad8a243e 100644
--- a/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
+++ b/arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
@@ -44,7 +44,7 @@
status = "okay";
/* spi0.0: 4M Flash Macronix MX25R4035FM1IL0 */
- spi-flash@0 {
+ flash@0 {
compatible = "mxicy,mx25u4035", "jedec,spi-nor";
spi-max-frequency = <33000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91-q5xr5.dts b/arch/arm/boot/dts/at91-q5xr5.dts
index 47a00062f01f..9cf60b6f695c 100644
--- a/arch/arm/boot/dts/at91-q5xr5.dts
+++ b/arch/arm/boot/dts/at91-q5xr5.dts
@@ -125,7 +125,7 @@
cs-gpios = <&pioA 3 GPIO_ACTIVE_HIGH>, <&pioC 11 GPIO_ACTIVE_LOW>, <0>, <0>;
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
spi-max-frequency = <20000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91-sam9_l9260.dts b/arch/arm/boot/dts/at91-sam9_l9260.dts
index 1e2a28c2f365..2fb51b9aca2a 100644
--- a/arch/arm/boot/dts/at91-sam9_l9260.dts
+++ b/arch/arm/boot/dts/at91-sam9_l9260.dts
@@ -101,7 +101,7 @@
nand0: nand@40000000 {
nand-bus-width = <8>;
nand-ecc-mode = "soft";
- nand-on-flash-bbt = <1>;
+ nand-on-flash-bbt;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
index 21c86171e462..ba621783acdb 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
@@ -214,7 +214,7 @@
pinctrl-0 = <&pinctrl_qspi1_default>;
status = "disabled";
- qspi1_flash: spi_flash@0 {
+ qspi1_flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "jedec,spi-nor";
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
index c145c4e5ef58..5e8755f22784 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
@@ -191,7 +191,7 @@
&qspi1 {
status = "okay";
- qspi1_flash: spi_flash@0 {
+ qspi1_flash: flash@0 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 9bf2ec0ba3e2..cdfe891f9a9e 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -137,7 +137,7 @@
pinctrl-0 = <&pinctrl_spi0_default>;
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
reg = <0>;
spi-max-frequency = <50000000>;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index d72c042f2850..a49c2966b41e 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -57,8 +57,8 @@
};
spi0: spi@f0004000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_spi0_cs>;
+ pinctrl-names = "default", "cs";
+ pinctrl-1 = <&pinctrl_spi0_cs>;
cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
status = "okay";
};
@@ -171,8 +171,8 @@
};
spi1: spi@f8008000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_spi1_cs>;
+ pinctrl-names = "default", "cs";
+ pinctrl-1 = <&pinctrl_spi1_cs>;
cs-gpios = <&pioC 25 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
index 710cb72bda5a..fd1086f52b40 100644
--- a/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
@@ -49,7 +49,7 @@
cs-gpios = <&pioC 3 0>, <0>, <0>, <0>;
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index d241c24f0d83..e519d2747936 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -81,8 +81,8 @@
};
spi1: spi@fc018000 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_spi0_cs>;
+ pinctrl-names = "default", "cs";
+ pinctrl-1 = <&pinctrl_spi1_cs>;
cs-gpios = <&pioB 21 0>;
status = "okay";
};
@@ -140,7 +140,7 @@
atmel,pins =
<AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
- pinctrl_spi0_cs: spi0_cs_default {
+ pinctrl_spi1_cs: spi1_cs_default {
atmel,pins =
<AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index fe432b6b7e95..7017f626f362 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -65,7 +65,7 @@
spi0: spi@f8010000 {
cs-gpios = <&pioC 3 0>, <0>, <0>, <0>;
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
index 08685a10eda1..d83f76a6cd6a 100644
--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
@@ -495,7 +495,7 @@
pinctrl_flx3_default: flx3_default {
pinmux = <PIN_PD16__FLEXCOM3_IO0>,
<PIN_PD17__FLEXCOM3_IO1>;
- bias-disable;
+ bias-pull-up;
};
pinctrl_flx4_default: flx4_default {
@@ -655,7 +655,7 @@
<PIN_PB21__QSPI0_INT>;
bias-disable;
slew-rate = <0>;
- atmel,drive-strength = <ATMEL_PIO_DRVSTR_HI>;
+ atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
};
pinctrl_sdmmc0_default: sdmmc0_default {
diff --git a/arch/arm/boot/dts/at91-vinco.dts b/arch/arm/boot/dts/at91-vinco.dts
index a51a3372afa1..ebeaa6ab500e 100644
--- a/arch/arm/boot/dts/at91-vinco.dts
+++ b/arch/arm/boot/dts/at91-vinco.dts
@@ -59,7 +59,7 @@
spi0: spi@f8010000 {
cs-gpios = <&pioC 3 0>, <0>, <0>, <0>;
status = "okay";
- m25p80@0 {
+ flash@0 {
compatible = "n25q32b", "jedec,spi-nor";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts
index e1ef4e44e663..4624a6f076f8 100644
--- a/arch/arm/boot/dts/at91rm9200ek.dts
+++ b/arch/arm/boot/dts/at91rm9200ek.dts
@@ -73,7 +73,7 @@
spi0: spi@fffe0000 {
status = "okay";
cs-gpios = <&pioA 3 0>, <0>, <0>, <0>;
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
spi-max-frequency = <15000000>;
reg = <0>;
@@ -94,7 +94,7 @@
status = "okay";
};
- nor_flash@10000000 {
+ flash@10000000 {
compatible = "cfi-flash";
reg = <0x10000000 0x800000>;
linux,mtd-name = "physmap-flash.0";
diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
index ce96345d28a3..6381088ba24f 100644
--- a/arch/arm/boot/dts/at91sam9260ek.dts
+++ b/arch/arm/boot/dts/at91sam9260ek.dts
@@ -92,7 +92,7 @@
spi0: spi@fffc8000 {
cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
- mtd_dataflash@1 {
+ flash@1 {
compatible = "atmel,at45", "atmel,dataflash";
spi-max-frequency = <50000000>;
reg = <1>;
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index beed819609e8..8f11c0b7d76d 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -145,7 +145,7 @@
cs-gpios = <&pioA 3 0>, <0>, <&pioA 28 0>, <0>;
status = "okay";
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
reg = <0>;
spi-max-frequency = <15000000>;
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 71f60576761a..42e734020235 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -95,7 +95,7 @@
spi0: spi@fffa4000 {
status = "okay";
cs-gpios = <&pioA 5 0>, <0>, <0>, <0>;
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 87bb39060e8b..85c17dd1c8d5 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -110,7 +110,7 @@
spi0: spi@fffc8000 {
cs-gpios = <0>, <&pioC 11 0>, <0>, <0>;
- mtd_dataflash@1 {
+ flash@1 {
compatible = "atmel,at45", "atmel,dataflash";
spi-max-frequency = <50000000>;
reg = <1>;
@@ -214,11 +214,23 @@
24c512@50 {
compatible = "atmel,24c512";
reg = <0x50>;
+ vcc-supply = <&reg_3v3>;
};
wm8731: wm8731@1b {
compatible = "wm8731";
reg = <0x1b>;
+
+ /* PCK0 at 12MHz */
+ clocks = <&pmc PMC_TYPE_SYSTEM 8>;
+ clock-names = "mclk";
+ assigned-clocks = <&pmc PMC_TYPE_SYSTEM 8>;
+ assigned-clock-rates = <12000000>;
+
+ HPVDD-supply = <&vcc_dac>;
+ AVDD-supply = <&vcc_dac>;
+ DCVDD-supply = <&reg_3v3>;
+ DBVDD-supply = <&reg_3v3>;
};
};
@@ -254,4 +266,35 @@
atmel,ssc-controller = <&ssc0>;
atmel,audio-codec = <&wm8731>;
};
+
+ reg_5v: fixedregulator0 {
+ compatible = "regulator-fixed";
+ regulator-name = "5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ };
+
+ reg_3v3: fixedregulator1 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ vin-supply = <&reg_5v>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_1v: fixedregulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "1V";
+ vin-supply = <&reg_5v>;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vcc_dac: fixedregulator3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_DAC";
+ vin-supply = <&reg_3v3>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
};
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index b6256a20fbc7..e5db198a87a8 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -167,7 +167,7 @@
spi0: spi@fffa4000{
status = "okay";
cs-gpios = <&pioB 3 0>, <0>, <0>, <0>;
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
spi-max-frequency = <13000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index 2bc4e6e0a923..c905d7bfc771 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -119,7 +119,7 @@
spi0: spi@f0000000 {
status = "okay";
cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index 62981b39c815..d74b8d9d84aa 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -180,7 +180,7 @@
spi0: spi@fffcc000 {
status = "okay";
cs-gpios = <&pioA 28 0>, <0>, <0>, <0>;
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
spi-max-frequency = <15000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index 6d1264de6060..5f4eaa618ab4 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -125,7 +125,7 @@
cs-gpios = <&pioA 14 0>, <0>, <0>, <0>;
status = "disabled"; /* conflicts with mmc1 */
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 87c517d65f62..e9aecac4f5b5 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -278,7 +278,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi1_pins &spi1_cs0_pin>;
- flash: m25p80@0 {
+ flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "jedec,spi-nor";
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
index 5126e2d72ed7..778796c10af8 100644
--- a/arch/arm/boot/dts/dm8168-evm.dts
+++ b/arch/arm/boot/dts/dm8168-evm.dts
@@ -177,7 +177,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mcspi1_pins>;
- m25p80@0 {
+ flash@0 {
compatible = "w25x32";
spi-max-frequency = <48000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 0a11bacffc1f..5733e3a4ea8e 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -4188,11 +4188,11 @@
reg = <0x1d0010 0x4>;
reg-names = "sysc";
ti,sysc-midle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>;
+ <SYSC_IDLE_NO>;
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>;
+ power-domains = <&prm_vpe>;
clocks = <&vpe_clkctrl DRA7_VPE_VPE_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/imx28-ts4600.dts b/arch/arm/boot/dts/imx28-ts4600.dts
index 097ec35c62d8..0d58da1c0cc5 100644
--- a/arch/arm/boot/dts/imx28-ts4600.dts
+++ b/arch/arm/boot/dts/imx28-ts4600.dts
@@ -26,7 +26,7 @@
pinctrl-0 = <&mmc0_4bit_pins_a
&mmc0_sck_cfg
&en_sd_pwr>;
- broken-cd = <1>;
+ broken-cd;
bus-width = <4>;
vmmc-supply = <&reg_vddio_sd0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
index ed2739e39085..bd763bae596b 100644
--- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi
@@ -286,6 +286,8 @@
codec: sgtl5000@a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_module_3v3_audio>;
VDDIO-supply = <&reg_module_3v3>;
@@ -517,8 +519,6 @@
MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x130b0
MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0
MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0
- /* SGTL5000 sys_mclk */
- MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
>;
};
@@ -811,6 +811,12 @@
>;
};
+ pinctrl_sgtl5000: sgtl5000grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
+ >;
+ };
+
pinctrl_spdif: spdifgrp {
fsl,pins = <
MX6QDL_PAD_GPIO_16__SPDIF_IN 0x1b0b0
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 563bf9d44fe0..0b90c3f59f89 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -154,112 +154,112 @@
regulators {
bcore1 {
regulator-name = "bcore1";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
bcore2 {
regulator-name = "bcore2";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
bpro {
regulator-name = "bpro";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
bperi {
regulator-name = "bperi";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
bmem {
regulator-name = "bmem";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo2 {
regulator-name = "ldo2";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <1800000>;
};
ldo3 {
regulator-name = "ldo3";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo4 {
regulator-name = "ldo4";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo5 {
regulator-name = "ldo5";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo6 {
regulator-name = "ldo6";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo7 {
regulator-name = "ldo7";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo8 {
regulator-name = "ldo8";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo9 {
regulator-name = "ldo9";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo10 {
regulator-name = "ldo10";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
ldo11 {
regulator-name = "ldo11";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <300000>;
regulator-max-microvolt = <3300000>;
};
bio {
regulator-name = "bio";
- regulator-always-on = <1>;
+ regulator-always-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
diff --git a/arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi b/arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi
index 7cda6944501d..205e4d462702 100644
--- a/arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi
+++ b/arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi
@@ -72,8 +72,8 @@
st,settling = <2>;
st,fraction-z = <7>;
st,i-drive = <1>;
- touchscreen-inverted-x = <1>;
- touchscreen-inverted-y = <1>;
+ touchscreen-inverted-x;
+ touchscreen-inverted-y;
};
};
};
diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi
index 7f35a06dff95..951a2a6c5a65 100644
--- a/arch/arm/boot/dts/imx6ull-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi
@@ -37,7 +37,7 @@
reg_sd1_vmmc: regulator-sd1-vmmc {
compatible = "regulator-gpio";
- gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_snvs_reg_sd>;
regulator-always-on;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
index 2a0a98fe67f0..3240c67e0c39 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
@@ -11,3 +11,18 @@
model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit";
compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3";
};
+
+&omap3_pmx_core2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsusb2_2_pins>;
+ hsusb2_2_pins: pinmux_hsusb2_2_pins {
+ pinctrl-single,pins = <
+ OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
+ OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
+ OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
+ OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
+ OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
+ OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index a604d92221a4..c757f0d7781c 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -11,3 +11,18 @@
model = "LogicPD Zoom DM3730 SOM-LV Development Kit";
compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3630", "ti,omap3";
};
+
+&omap3_pmx_core2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hsusb2_2_pins>;
+ hsusb2_2_pins: pinmux_hsusb2_2_pins {
+ pinctrl-single,pins = <
+ OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
+ OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
+ OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
+ OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
+ OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
+ OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index b56524cc7fe2..55b619c99e24 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -265,21 +265,6 @@
};
};
-&omap3_pmx_core2 {
- pinctrl-names = "default";
- pinctrl-0 = <&hsusb2_2_pins>;
- hsusb2_2_pins: pinmux_hsusb2_2_pins {
- pinctrl-single,pins = <
- OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */
- OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */
- OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */
- OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */
- OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */
- OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */
- >;
- };
-};
-
&uart2 {
interrupts-extended = <&intc 73 &omap3_pmx_core OMAP3_UART2_RX>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index b4664ab00256..d3da8b1b473b 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -118,8 +118,8 @@
gpmc,device-width = <2>;
gpmc,wait-pin = <0>;
gpmc,burst-length = <4>;
- gpmc,cycle2cycle-samecsen = <1>;
- gpmc,cycle2cycle-diffcsen = <1>;
+ gpmc,cycle2cycle-samecsen;
+ gpmc,cycle2cycle-diffcsen;
gpmc,cs-on-ns = <0>;
gpmc,cs-rd-off-ns = <45>;
gpmc,cs-wr-off-ns = <45>;
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index 7e3d8147e2c1..0365f06165e9 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -31,6 +31,8 @@
aliases {
display0 = &lcd;
display1 = &tv0;
+ /delete-property/ mmc2;
+ /delete-property/ mmc3;
};
ldo_3v3: fixedregulator {
diff --git a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
index cbe42c4153a0..b4d286a6fab1 100644
--- a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
@@ -76,7 +76,7 @@
pinconf {
pins = "gpio20", "gpio21";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
};
@@ -116,7 +116,7 @@
pinconf {
pins = "gpio24", "gpio25";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
};
@@ -141,7 +141,7 @@
pinconf {
pins = "gpio8", "gpio9";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
};
@@ -166,7 +166,7 @@
pinconf {
pins = "gpio12", "gpio13";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
};
@@ -229,7 +229,7 @@
pinconf {
pins = "gpio16", "gpio17";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
};
@@ -282,7 +282,7 @@
pinconf {
pins = "gpio84", "gpio85";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
};
diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
index 996f4458d9fc..8cb04aa8ed2f 100644
--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
@@ -972,7 +972,7 @@
snps,axi-config = <&stmmac_axi_setup>;
snps,pbl = <32>;
- snps,aal = <1>;
+ snps,aal;
qcom,nss-common = <&nss_common>;
qcom,qsgmii-csr = <&qsgmii_csr>;
@@ -996,7 +996,7 @@
snps,axi-config = <&stmmac_axi_setup>;
snps,pbl = <32>;
- snps,aal = <1>;
+ snps,aal;
qcom,nss-common = <&nss_common>;
qcom,qsgmii-csr = <&qsgmii_csr>;
@@ -1020,7 +1020,7 @@
snps,axi-config = <&stmmac_axi_setup>;
snps,pbl = <32>;
- snps,aal = <1>;
+ snps,aal;
qcom,nss-common = <&nss_common>;
qcom,qsgmii-csr = <&qsgmii_csr>;
@@ -1044,7 +1044,7 @@
snps,axi-config = <&stmmac_axi_setup>;
snps,pbl = <32>;
- snps,aal = <1>;
+ snps,aal;
qcom,nss-common = <&nss_common>;
qcom,qsgmii-csr = <&qsgmii_csr>;
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index a499de8a7a64..3652c9e24124 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -26,7 +26,7 @@
spi0: spi@f0004000 {
dmas = <0>, <0>; /* Do not use DMA for spi0 */
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi b/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
index fa9e5e2a745d..5d9e97fecf83 100644
--- a/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
@@ -25,7 +25,7 @@
spi0: spi@f0004000 {
dmas = <0>, <0>; /* Do not use DMA for spi0 */
- m25p80@0 {
+ flash@0 {
compatible = "atmel,at25df321a";
spi-max-frequency = <50000000>;
reg = <0>;
diff --git a/arch/arm/boot/dts/sama7g5.dtsi b/arch/arm/boot/dts/sama7g5.dtsi
index 4decd3a91a76..f691c8f08d04 100644
--- a/arch/arm/boot/dts/sama7g5.dtsi
+++ b/arch/arm/boot/dts/sama7g5.dtsi
@@ -601,9 +601,9 @@
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 39>;
atmel,fifo-size = <32>;
- dmas = <&dma0 AT91_XDMAC_DT_PERID(7)>,
- <&dma0 AT91_XDMAC_DT_PERID(8)>;
- dma-names = "rx", "tx";
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(8)>,
+ <&dma0 AT91_XDMAC_DT_PERID(7)>;
+ dma-names = "tx", "rx";
status = "disabled";
};
};
@@ -786,9 +786,9 @@
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 46>;
atmel,fifo-size = <32>;
- dmas = <&dma0 AT91_XDMAC_DT_PERID(21)>,
- <&dma0 AT91_XDMAC_DT_PERID(22)>;
- dma-names = "rx", "tx";
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(22)>,
+ <&dma0 AT91_XDMAC_DT_PERID(21)>;
+ dma-names = "tx", "rx";
status = "disabled";
};
};
@@ -810,9 +810,9 @@
#size-cells = <0>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 47>;
atmel,fifo-size = <32>;
- dmas = <&dma0 AT91_XDMAC_DT_PERID(23)>,
- <&dma0 AT91_XDMAC_DT_PERID(24)>;
- dma-names = "rx", "tx";
+ dmas = <&dma0 AT91_XDMAC_DT_PERID(24)>,
+ <&dma0 AT91_XDMAC_DT_PERID(23)>;
+ dma-names = "tx", "rx";
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index 4cbadcb41084..ddd1cf4d0554 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -379,7 +379,7 @@
};
};
- m25p80@1 {
+ flash@1 {
compatible = "st,m25p80";
reg = <1>;
spi-max-frequency = <12000000>;
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index fd194ebeedc9..3a51a41eb5e4 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -439,7 +439,7 @@
cs-gpios = <&gpiopinctrl 80 0>, <&gpiopinctrl 24 0>,
<&gpiopinctrl 85 0>;
- m25p80@0 {
+ flash@0 {
compatible = "m25p80";
reg = <0>;
spi-max-frequency = <12000000>;
diff --git a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
index 33ae5e0590df..ac53ee3c496b 100644
--- a/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
@@ -398,7 +398,7 @@
#size-cells = <0>;
status = "okay";
- flash0: is25lp016d@0 {
+ flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <133000000>;
diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts
index e222d2d2cb44..d142dd30e16b 100644
--- a/arch/arm/boot/dts/stm32mp157c-ev1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts
@@ -262,7 +262,7 @@
#size-cells = <0>;
status = "okay";
- flash0: mx66l51235l@0 {
+ flash0: flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-rx-bus-width = <4>;
@@ -271,7 +271,7 @@
#size-cells = <1>;
};
- flash1: mx66l51235l@1 {
+ flash1: flash@1 {
compatible = "jedec,spi-nor";
reg = <1>;
spi-rx-bus-width = <4>;
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts
index 8a0cfbfd0c45..b6cb9cdf8197 100644
--- a/arch/arm/boot/dts/usb_a9263.dts
+++ b/arch/arm/boot/dts/usb_a9263.dts
@@ -60,7 +60,7 @@
spi0: spi@fffa4000 {
cs-gpios = <&pioB 15 GPIO_ACTIVE_HIGH>;
status = "okay";
- mtd_dataflash@0 {
+ flash@0 {
compatible = "atmel,at45", "atmel,dataflash";
reg = <0>;
spi-max-frequency = <15000000>;
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
index a7acfee11ffc..a80bc8a43091 100644
--- a/arch/arm/configs/gemini_defconfig
+++ b/arch/arm/configs/gemini_defconfig
@@ -49,11 +49,13 @@ CONFIG_ATA=y
CONFIG_PATA_FTIDE010=y
CONFIG_NETDEVICES=y
CONFIG_TUN=y
+CONFIG_NET_DSA_REALTEK=y
CONFIG_NET_DSA_REALTEK_SMI=y
+CONFIG_NET_DSA_REALTEK_RTL8366RB=y
CONFIG_GEMINI_ETHERNET=y
+CONFIG_MARVELL_PHY=y
CONFIG_MDIO_BITBANG=y
CONFIG_MDIO_GPIO=y
-CONFIG_MARVELL_PHY=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -66,6 +68,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_DRIVETEMP=y
CONFIG_SENSORS_GPIO_FAN=y
CONFIG_SENSORS_LM75=y
CONFIG_THERMAL=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
deleted file mode 100644
index 015b7ef237de..000000000000
--- a/arch/arm/configs/imote2_defconfig
+++ /dev/null
@@ -1,365 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_EXPERT=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_INTELMOTE2=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=jffs2 console=ttyS2,115200 mem=32M"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
-CONFIG_APM_EMULATION=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_LED=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_BRIDGE=m
-# CONFIG_BRIDGE_IGMP_SNOOPING is not set
-CONFIG_IEEE802154=y
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_FW_LOADER=m
-CONFIG_CONNECTOR=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_AFS_PARTS=y
-CONFIG_MTD_AR7_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
-# CONFIG_MTD_CFI_I2 is not set
-CONFIG_MTD_OTP=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PXA2XX=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-# CONFIG_WLAN is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_PXA27x=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_PXA2XX=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_POWER_SUPPLY=y
-# CONFIG_HWMON is not set
-CONFIG_PMIC_DA903X=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_DA903X=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-CONFIG_MEDIA_TUNER_CUSTOMISE=y
-# CONFIG_MEDIA_TUNER_SIMPLE is not set
-# CONFIG_MEDIA_TUNER_TDA8290 is not set
-# CONFIG_MEDIA_TUNER_TDA827X is not set
-# CONFIG_MEDIA_TUNER_TDA18271 is not set
-# CONFIG_MEDIA_TUNER_TDA9887 is not set
-# CONFIG_MEDIA_TUNER_TEA5761 is not set
-# CONFIG_MEDIA_TUNER_TEA5767 is not set
-# CONFIG_MEDIA_TUNER_MT20XX is not set
-# CONFIG_MEDIA_TUNER_MT2060 is not set
-# CONFIG_MEDIA_TUNER_MT2266 is not set
-# CONFIG_MEDIA_TUNER_MT2131 is not set
-# CONFIG_MEDIA_TUNER_QT1010 is not set
-# CONFIG_MEDIA_TUNER_XC2028 is not set
-# CONFIG_MEDIA_TUNER_XC5000 is not set
-# CONFIG_MEDIA_TUNER_MXL5005S is not set
-# CONFIG_MEDIA_TUNER_MXL5007T is not set
-# CONFIG_MEDIA_TUNER_MC44S803 is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-CONFIG_VIDEO_PXA27x=y
-# CONFIG_V4L_USB_DRIVERS is not set
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_FB_PXA_OVERLAY=y
-CONFIG_FB_PXA_PARAMETERS=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_PXA2XX_SOC=y
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_PXA27X=y
-CONFIG_USB_ETH=m
-# CONFIG_USB_ETH_RNDIS is not set
-CONFIG_MMC=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_PXA=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_LP3944=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_BACKLIGHT=y
-CONFIG_LEDS_TRIGGER_GPIO=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=m
-CONFIG_CUSE=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_SMB_FS=m
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_PROVE_LOCKING=y
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6e0c8c19b35c..d6a6811f0539 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -673,6 +673,7 @@ CONFIG_VIDEO_STI_DELTA=m
CONFIG_VIDEO_RENESAS_FDP1=m
CONFIG_VIDEO_RENESAS_JPU=m
CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_VIDEO_TEGRA_VDE=m
CONFIG_V4L_TEST_DRIVERS=y
CONFIG_VIDEO_VIVID=m
CONFIG_VIDEO_ADV7180=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 289d022acc4b..c209722399d7 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -286,7 +286,8 @@ CONFIG_SERIO_NVEC_PS2=y
CONFIG_NVEC_POWER=y
CONFIG_NVEC_PAZ00=y
CONFIG_STAGING_MEDIA=y
-CONFIG_TEGRA_VDE=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_TEGRA_VDE=y
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=m
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 3b30913d7d8d..a352207a64d7 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -20,7 +20,6 @@ CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_CMA=y
CONFIG_NET=y
@@ -41,6 +40,8 @@ CONFIG_MAC80211_LEDS=y
CONFIG_CAIF=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_GNSS=y
+CONFIG_GNSS_SIRF_SERIAL=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_NETDEVICES=y
@@ -83,6 +84,8 @@ CONFIG_SPI_GPIO=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
+CONFIG_BATTERY_SAMSUNG_SDI=y
+CONFIG_AB8500_BM=y
CONFIG_SENSORS_IIO_HWMON=y
CONFIG_SENSORS_NTC_THERMISTOR=y
CONFIG_THERMAL=y
@@ -98,10 +101,13 @@ CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_V4L2_FLASH_LED_CLASS=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_NOVATEK_NT35510=y
+CONFIG_DRM_PANEL_NOVATEK_NT35560=y
+CONFIG_DRM_PANEL_SAMSUNG_DB7430=y
CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6D27A1=y
CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=y
CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
-CONFIG_DRM_PANEL_SONY_ACX424AKP=y
+CONFIG_DRM_PANEL_WIDECHIPS_WS2401=y
CONFIG_DRM_LIMA=y
CONFIG_DRM_MCDE=y
CONFIG_FB=y
@@ -129,6 +135,7 @@ CONFIG_LEDS_LM3530=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_LP55XX_COMMON=y
CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_REGULATOR=y
CONFIG_LEDS_RT8515=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
@@ -144,17 +151,22 @@ CONFIG_IIO_SW_TRIGGER=y
CONFIG_BMA180=y
CONFIG_BMC150_ACCEL=y
CONFIG_IIO_ST_ACCEL_3AXIS=y
+# CONFIG_IIO_ST_ACCEL_SPI_3AXIS is not set
CONFIG_IIO_RESCALE=y
CONFIG_MPU3050_I2C=y
CONFIG_IIO_ST_GYRO_3AXIS=y
+# CONFIG_IIO_ST_GYRO_SPI_3AXIS is not set
CONFIG_INV_MPU6050_I2C=y
CONFIG_BH1780=y
CONFIG_GP2AP002=y
+CONFIG_TSL2772=y
CONFIG_AK8974=y
CONFIG_IIO_ST_MAGN_3AXIS=y
+# CONFIG_IIO_ST_MAGN_SPI_3AXIS is not set
CONFIG_YAMAHA_YAS530=y
CONFIG_IIO_HRTIMER_TRIGGER=y
CONFIG_IIO_ST_PRESS=y
+# CONFIG_IIO_ST_PRESS_SPI is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -173,10 +185,9 @@ CONFIG_CRYPTO_DEV_UX500_CRYP=y
CONFIG_CRYPTO_DEV_UX500_HASH=y
CONFIG_CRYPTO_DEV_UX500_DEBUG=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 428012687a80..7f7f6bae21c2 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
int ret;
u32 val;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- u8 rmii_en = soc_info->emac_pdata->rmii_en;
+ u8 rmii_en;
if (!machine_is_davinci_da850_evm())
return 0;
+ rmii_en = soc_info->emac_pdata->rmii_en;
+
cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
val = __raw_readl(cfg_chip3_base);
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index cc75087134d3..4fa6ea5461b7 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -148,8 +148,10 @@ static struct clk_hw *ep93xx_clk_register_gate(const char *name,
psc->lock = &clk_lock;
clk = clk_register(NULL, &psc->hw);
- if (IS_ERR(clk))
+ if (IS_ERR(clk)) {
kfree(psc);
+ return ERR_CAST(clk);
+ }
return &psc->hw;
}
@@ -207,7 +209,7 @@ static int ep93xx_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned long rate = req->rate;
- struct clk *best_parent = 0;
+ struct clk *best_parent = NULL;
unsigned long __parent_rate;
unsigned long best_rate = 0, actual_rate, mclk_rate;
unsigned long best_parent_rate;
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index f7d993628cb7..a9c1efcf7c9c 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -17,7 +17,6 @@ menuconfig ARCH_EXYNOS
select EXYNOS_PMU
select EXYNOS_SROM
select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
- select GPIOLIB
select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
select HAVE_ARM_SCU if SMP
select PINCTRL
diff --git a/arch/arm/mach-iop32x/cp6.c b/arch/arm/mach-iop32x/cp6.c
index 2882674a1c39..7135a0ac9949 100644
--- a/arch/arm/mach-iop32x/cp6.c
+++ b/arch/arm/mach-iop32x/cp6.c
@@ -7,6 +7,8 @@
#include <asm/traps.h>
#include <asm/ptrace.h>
+#include "iop3xx.h"
+
void iop_enable_cp6(void)
{
u32 temp;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 5c3845730dbf..0b80f8bcd304 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -314,10 +314,12 @@ void __init omap_gic_of_init(void)
np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
gic_dist_base_addr = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!gic_dist_base_addr);
np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer");
twd_base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!twd_base);
skip_errata_init:
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index 1da11bdb1dfb..6e6985e756af 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -122,13 +122,13 @@ static inline bool cluster_is_a15(u32 cluster)
}
/**
- * ve_spc_global_wakeup_irq()
+ * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs
+ *
+ * @set: if true, global wake-up IRQs are set, if false they are cleared
*
* Function to set/clear global wakeup IRQs. Not protected by locking since
* it might be used in code paths where normal cacheable locks are not
* working. Locking must be provided by the caller to ensure atomicity.
- *
- * @set: if true, global wake-up IRQs are set, if false they are cleared
*/
void ve_spc_global_wakeup_irq(bool set)
{
@@ -145,15 +145,15 @@ void ve_spc_global_wakeup_irq(bool set)
}
/**
- * ve_spc_cpu_wakeup_irq()
- *
- * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
- * it might be used in code paths where normal cacheable locks are not
- * working. Locking must be provided by the caller to ensure atomicity.
+ * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs
*
* @cluster: mpidr[15:8] bitfield describing cluster affinity level
* @cpu: mpidr[7:0] bitfield describing cpu affinity level
* @set: if true, wake-up IRQs are set, if false they are cleared
+ *
+ * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
+ * it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
*/
void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
{
@@ -200,14 +200,14 @@ void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
}
/**
- * ve_spc_powerdown()
+ * ve_spc_powerdown() - enables/disables cluster powerdown
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @enable: if true enables powerdown, if false disables it
*
* Function to enable/disable cluster powerdown. Not protected by locking
* since it might be used in code paths where normal cacheable locks are not
* working. Locking must be provided by the caller to ensure atomicity.
- *
- * @cluster: mpidr[15:8] bitfield describing cluster affinity level
- * @enable: if true enables powerdown, if false disables it
*/
void ve_spc_powerdown(u32 cluster, bool enable)
{
@@ -228,7 +228,7 @@ static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
}
/**
- * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+ * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not
*
* @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
* @cluster: mpidr[15:8] bitfield describing cluster affinity level
@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
}
cluster = topology_physical_package_id(cpu_dev->id);
- if (init_opp_table[cluster])
+ if (cluster < 0 || init_opp_table[cluster])
continue;
if (ve_init_opp_table(cpu_dev))
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index ec5b082f3de6..07eb69f9e7df 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -337,12 +337,15 @@ int __init arch_xen_unpopulated_init(struct resource **res)
if (!nr_reg) {
pr_err("No extended regions are found\n");
+ of_node_put(np);
return -EINVAL;
}
regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
- if (!regs)
+ if (!regs) {
+ of_node_put(np);
return -ENOMEM;
+ }
/*
* Create resource from extended regions provided by the hypervisor to be
@@ -403,8 +406,8 @@ int __init arch_xen_unpopulated_init(struct resource **res)
*res = &xen_resource;
err:
+ of_node_put(np);
kfree(regs);
-
return rc;
}
#endif
@@ -424,8 +427,10 @@ static void __init xen_dt_guest_init(void)
if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
pr_err("Xen grant table region is not found\n");
+ of_node_put(xen_node);
return;
}
+ of_node_put(xen_node);
xen_grant_frames = res.start;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 57c4c995965f..20ea89d9ac2f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -175,8 +175,6 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS \
- if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -228,6 +226,17 @@ config ARM64
help
ARM 64-bit (AArch64) Linux support.
+config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+ def_bool CC_IS_CLANG
+ # https://github.com/ClangBuiltLinux/linux/issues/1507
+ depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+ def_bool CC_IS_GCC
+ depends on $(cc-option,-fpatchable-function-entry=2)
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
config 64BIT
def_bool y
@@ -678,7 +687,7 @@ config ARM64_ERRATUM_2051678
default y
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
- Affected Coretex-A510 might not respect the ordering rules for
+ Affected Cortex-A510 might not respect the ordering rules for
hardware update of the page table's dirty bit. The workaround
is to not enable the feature on affected CPUs.
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
index d61f43052a34..8e9ad1e51d66 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
@@ -11,26 +11,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <761000>;
@@ -71,26 +51,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
index 1e5d0ee5d541..44c23c984034 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
@@ -11,26 +11,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <731000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <731000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <731000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <731000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <731000>;
@@ -76,26 +56,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <751000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <751000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <751000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <667000000>;
- opp-microvolt = <751000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <771000>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index bf9ae1e1016b..480afa2cc61f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -13,28 +13,28 @@
cpu0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x0>;
enable-method = "psci";
};
cpu1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x1>;
enable-method = "psci";
};
cpu2: cpu@2 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x2>;
enable-method = "psci";
};
cpu3: cpu@3 {
device_type = "cpu";
- compatible = "arm,cortex-a35","arm,armv8";
+ compatible = "arm,cortex-a35";
reg = <0x0 0x3>;
enable-method = "psci";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
index 5751c48620ed..cadba194b149 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
@@ -437,6 +437,7 @@
"",
"eMMC_RST#", /* BOOT_12 */
"eMMC_DS", /* BOOT_13 */
+ "", "",
/* GPIOC */
"SD_D0_B", /* GPIOC_0 */
"SD_D1_B", /* GPIOC_1 */
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
index 3c07a89bfd27..80737731af3f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
@@ -95,26 +95,6 @@
compatible = "operating-points-v2";
opp-shared;
- opp-100000000 {
- opp-hz = /bits/ 64 <100000000>;
- opp-microvolt = <730000>;
- };
-
- opp-250000000 {
- opp-hz = /bits/ 64 <250000000>;
- opp-microvolt = <730000>;
- };
-
- opp-500000000 {
- opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <730000>;
- };
-
- opp-667000000 {
- opp-hz = /bits/ 64 <666666666>;
- opp-microvolt = <750000>;
- };
-
opp-1000000000 {
opp-hz = /bits/ 64 <1000000000>;
opp-microvolt = <770000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
index 1dc9d187601c..a0bd540f27d3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
@@ -89,12 +89,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
- touchscreen-size-x = /bits/ 16 <4008>;
+ touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
- touchscreen-size-y = /bits/ 16 <3864>;
+ touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
- touchscreen-max-pressure = /bits/ 16 <255>;
- touchscreen-average-samples = /bits/ 16 <10>;
+ touchscreen-max-pressure = <255>;
+ touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
index 73addc0b8e57..cce55c3c5df0 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
@@ -146,12 +146,14 @@
&usbotg1 {
dr_mode = "otg";
+ over-current-active-low;
vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
&usbotg2 {
dr_mode = "host";
+ disable-over-current;
status = "okay";
};
@@ -215,7 +217,7 @@
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
index 1e7badb2a82e..f61e4847fa49 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
@@ -211,12 +211,14 @@
&usbotg1 {
dr_mode = "otg";
+ over-current-active-low;
vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
&usbotg2 {
dr_mode = "host";
+ disable-over-current;
vbus-supply = <&reg_usb_otg2_vbus>;
status = "okay";
};
@@ -309,7 +311,7 @@
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
index 426483ec1f88..023619648966 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
@@ -238,12 +238,14 @@
&usbotg1 {
dr_mode = "otg";
+ over-current-active-low;
vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
&usbotg2 {
dr_mode = "host";
+ disable-over-current;
vbus-supply = <&reg_usb_otg2_vbus>;
status = "okay";
};
@@ -358,7 +360,7 @@
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6
- MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6
MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
index 7dfee715a2c4..d8ce217c6016 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
@@ -59,6 +59,10 @@
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
rohm,reset-snvs-powered;
+ #clock-cells = <0>;
+ clocks = <&osc_32k 0>;
+ clock-output-names = "clk-32k-out";
+
regulators {
buck1_reg: BUCK1 {
regulator-name = "buck1";
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
index b16c7caf34c1..87b5e23c766f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
@@ -70,12 +70,12 @@
pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
ti,x-min = /bits/ 16 <125>;
- touchscreen-size-x = /bits/ 16 <4008>;
+ touchscreen-size-x = <4008>;
ti,y-min = /bits/ 16 <282>;
- touchscreen-size-y = /bits/ 16 <3864>;
+ touchscreen-size-y = <3864>;
ti,x-plate-ohms = /bits/ 16 <180>;
- touchscreen-max-pressure = /bits/ 16 <255>;
- touchscreen-average-samples = /bits/ 16 <10>;
+ touchscreen-max-pressure = <255>;
+ touchscreen-average-samples = <10>;
ti,debounce-tol = /bits/ 16 <3>;
ti,debounce-rep = /bits/ 16 <1>;
ti,settle-delay-usec = /bits/ 16 <150>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 99f0f5026674..5c0ca2490561 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -293,7 +293,7 @@
ranges;
sai2: sai@30020000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30020000 0x10000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
@@ -307,7 +307,7 @@
};
sai3: sai@30030000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30030000 0x10000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
@@ -321,7 +321,7 @@
};
sai5: sai@30050000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30050000 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
@@ -337,7 +337,7 @@
};
sai6: sai@30060000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30060000 0x10000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
@@ -394,7 +394,7 @@
};
sai7: sai@300b0000 {
- compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai";
+ compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x300b0000 0x10000>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
index 38ffcd145b33..899e8e7dbc24 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
@@ -253,7 +253,7 @@
#address-cells = <1>;
#size-cells = <1>;
spi-max-frequency = <84000000>;
- spi-tx-bus-width = <4>;
+ spi-tx-bus-width = <1>;
spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
index be8c76a0554c..4f767012f1f5 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
@@ -196,7 +196,7 @@
};
clk: clock-controller {
- compatible = "fsl,imx8qxp-clk", "fsl,scu-clk";
+ compatible = "fsl,imx8qm-clk", "fsl,scu-clk";
#clock-cells = <2>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index f0f81c23c16f..b9a48cfd760f 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -1249,14 +1249,14 @@
pins = "gpio47", "gpio48";
function = "blsp_i2c3";
drive-strength = <16>;
- bias-disable = <0>;
+ bias-disable;
};
blsp1_i2c3_sleep: blsp1-i2c2-sleep {
pins = "gpio47", "gpio48";
function = "gpio";
drive-strength = <2>;
- bias-disable = <0>;
+ bias-disable;
};
blsp2_uart3_4pins_default: blsp2-uart2-4pins {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
index e90f99ef5323..e47c74e513af 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -33,7 +33,7 @@ ap_h1_spi: &spi0 {};
};
&alc5682 {
- realtek,dmic-clk-driving-high = "true";
+ realtek,dmic-clk-driving-high;
};
&cpu6_alert0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
index 1084d5ce9ac7..07b729f9fec5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -630,7 +630,7 @@
pins = "gpio6", "gpio25", "gpio26";
function = "gpio";
drive-strength = <8>;
- bias-disable = <0>;
+ bias-disable;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
index 8553c8bf79bd..103cc40816fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
@@ -563,7 +563,7 @@
config {
pins = "gpio6", "gpio11";
drive-strength = <8>;
- bias-disable = <0>;
+ bias-disable;
};
};
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d62405ce3e6d..f71358271b71 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -40,13 +40,26 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
+#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
}
+#else
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
+ &kvm->arch.flags));
+
+ return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+}
+#endif
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
@@ -72,15 +85,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TVM;
}
- if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ if (vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 &= ~HCR_RW;
-
- /*
- * TID3: trap feature register accesses that we virtualise.
- * For now this is conditional, since no AArch32 feature regs
- * are currently virtualised.
- */
- if (!vcpu_el1_is_32bit(vcpu))
+ else
+ /*
+ * TID3: trap feature register accesses that we virtualise.
+ * For now this is conditional, since no AArch32 feature regs
+ * are currently virtualised.
+ */
vcpu->arch.hcr_el2 |= HCR_TID3;
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e3b25dc6c367..94a27a7520f4 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -127,6 +127,16 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_MTE_ENABLED 1
/* At least one vCPU has ran in the VM */
#define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
+ /*
+ * The following two bits are used to indicate the guest's EL1
+ * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
+ * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
+ * Otherwise, the guest's EL1 register width has not yet been
+ * determined yet.
+ */
+#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
+#define KVM_ARCH_FLAG_EL1_32BIT 4
+
unsigned long flags;
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 94e147e5456c..dff2b483ea50 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
-#define pmd_leaf(pmd) pmd_sect(pmd)
+#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
-#define pud_leaf(pud) pud_sect(pud)
+#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 2b3f3d0544b9..98d67444a5b6 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -95,7 +95,7 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
for_each_mte_vma(current, vma) {
struct elf_phdr phdr;
- phdr.p_type = PT_ARM_MEMTAG_MTE;
+ phdr.p_type = PT_AARCH64_MEMTAG_MTE;
phdr.p_offset = offset;
phdr.p_vaddr = vma->vm_start;
phdr.p_paddr = 0;
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 3d613e721a75..727c979b2b69 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -198,15 +198,15 @@ SYM_CODE_START(__kvm_hyp_host_vector)
invalid_host_el2_vect // FIQ EL2h
invalid_host_el2_vect // Error EL2h
- host_el1_sync_vect // Synchronous 64-bit EL1
- invalid_host_el1_vect // IRQ 64-bit EL1
- invalid_host_el1_vect // FIQ 64-bit EL1
- invalid_host_el1_vect // Error 64-bit EL1
-
- invalid_host_el1_vect // Synchronous 32-bit EL1
- invalid_host_el1_vect // IRQ 32-bit EL1
- invalid_host_el1_vect // FIQ 32-bit EL1
- invalid_host_el1_vect // Error 32-bit EL1
+ host_el1_sync_vect // Synchronous 64-bit EL1/EL0
+ invalid_host_el1_vect // IRQ 64-bit EL1/EL0
+ invalid_host_el1_vect // FIQ 64-bit EL1/EL0
+ invalid_host_el1_vect // Error 64-bit EL1/EL0
+
+ host_el1_sync_vect // Synchronous 32-bit EL1/EL0
+ invalid_host_el1_vect // IRQ 32-bit EL1/EL0
+ invalid_host_el1_vect // FIQ 32-bit EL1/EL0
+ invalid_host_el1_vect // Error 32-bit EL1/EL0
SYM_CODE_END(__kvm_hyp_host_vector)
/*
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index b47df73e98d7..ba20405d2dc2 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -145,6 +145,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
inject_abt64(vcpu, true, addr);
}
+void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
+{
+ unsigned long addr, esr;
+
+ addr = kvm_vcpu_get_fault_ipa(vcpu);
+ addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
+
+ if (kvm_vcpu_trap_is_iabt(vcpu))
+ kvm_inject_pabt(vcpu, addr);
+ else
+ kvm_inject_dabt(vcpu, addr);
+
+ /*
+ * If AArch64 or LPAE, set FSC to 0 to indicate an Address
+ * Size Fault at level 0, as if exceeding PARange.
+ *
+ * Non-LPAE guests will only get the external abort, as there
+ * is no way to to describe the ASF.
+ */
+ if (vcpu_el1_is_32bit(vcpu) &&
+ !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
+ return;
+
+ esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
+ esr &= ~GENMASK_ULL(5, 0);
+ vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+}
+
/**
* kvm_inject_undefined - inject an undefined instruction into the guest
* @vcpu: The vCPU in which to inject the exception
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 0d19259454d8..5400fc020164 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- bool logging_perm_fault = false;
+ bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
- logging_perm_fault = (fault_status == FSC_PERM && write_fault);
+ use_read_lock = (fault_status == FSC_PERM && write_fault &&
+ fault_granule == PAGE_SIZE);
} else {
vma_shift = get_vma_page_shift(vma, hva);
}
@@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* logging dirty logging, only acquire read lock for permission
* relaxation.
*/
- if (logging_perm_fault)
+ if (use_read_lock)
read_lock(&kvm->mmu_lock);
else
write_lock(&kvm->mmu_lock);
@@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
} else {
+ WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
+
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
memcache);
@@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
out_unlock:
- if (logging_perm_fault)
+ if (use_read_lock)
read_unlock(&kvm->mmu_lock);
else
write_unlock(&kvm->mmu_lock);
@@ -1334,6 +1337,25 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
+ if (fault_status == FSC_FAULT) {
+ /* Beyond sanitised PARange (which is the IPA limit) */
+ if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
+ kvm_inject_size_fault(vcpu);
+ return 1;
+ }
+
+ /* Falls between the IPA range and the PARange? */
+ if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) {
+ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
+
+ if (is_iabt)
+ kvm_inject_pabt(vcpu, fault_ipa);
+ else
+ kvm_inject_dabt(vcpu, fault_ipa);
+ return 1;
+ }
+ }
+
/* Synchronous External Abort? */
if (kvm_vcpu_abt_issea(vcpu)) {
/*
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 78fdc443adc7..3dc990ac4f44 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -177,6 +177,9 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
if (kvm_pmu_pmc_is_chained(pmc) &&
@@ -198,6 +201,9 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
u64 reg;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
__vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
@@ -322,6 +328,9 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
return;
@@ -357,7 +366,7 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc;
- if (!val)
+ if (!kvm_vcpu_has_pmu(vcpu) || !val)
return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
@@ -527,6 +536,9 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
return;
@@ -576,6 +588,9 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
int i;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
@@ -739,6 +754,9 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
{
u64 reg, mask;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
mask = ARMV8_PMU_EVTYPE_MASK;
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm);
@@ -827,6 +845,9 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
u64 val, mask = 0;
int base, i, nr_events;
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
base = 0;
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 372da09a2fab..708d80e8e60d 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -181,7 +181,8 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
- vcpu->run->system_event.flags = flags;
+ vcpu->run->system_event.ndata = 1;
+ vcpu->run->system_event.data[0] = flags;
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
@@ -215,15 +216,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
{
- switch(fn) {
- case PSCI_0_2_FN64_CPU_SUSPEND:
- case PSCI_0_2_FN64_CPU_ON:
- case PSCI_0_2_FN64_AFFINITY_INFO:
- /* Disallow these functions for 32bit guests */
- if (vcpu_mode_is_32bit(vcpu))
- return PSCI_RET_NOT_SUPPORTED;
- break;
- }
+ /*
+ * Prevent 32 bit guests from calling 64 bit PSCI functions.
+ */
+ if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+ return PSCI_RET_NOT_SUPPORTED;
return 0;
}
@@ -235,10 +232,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
unsigned long val;
int ret = 1;
- val = kvm_psci_check_allowed_function(vcpu, psci_fn);
- if (val)
- goto out;
-
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
/*
@@ -306,7 +299,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
-out:
smccc_set_retval(vcpu, val, 0, 0, 0);
return ret;
}
@@ -318,9 +310,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
unsigned long val;
int ret = 1;
- if (minor > 1)
- return -EINVAL;
-
switch(psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
@@ -426,6 +415,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
*/
int kvm_psci_call(struct kvm_vcpu *vcpu)
{
+ u32 psci_fn = smccc_get_function(vcpu);
+ unsigned long val;
+
+ val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+ if (val) {
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+ }
+
switch (kvm_psci_version(vcpu)) {
case KVM_ARM_PSCI_1_1:
return kvm_psci_1_x_call(vcpu, 1);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index ecc40c8cd6f6..6c70c6f61c70 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+/**
+ * kvm_set_vm_width() - set the register width for the guest
+ * @vcpu: Pointer to the vcpu being configured
+ *
+ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
+ * in the VM flags based on the vcpu's requested register width, the HW
+ * capabilities and other options (such as MTE).
+ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
+ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *tmp;
+ struct kvm *kvm = vcpu->kvm;
bool is32bit;
- unsigned long i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+
+ lockdep_assert_held(&kvm->lock);
+
+ if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+ /*
+ * The guest's register width is already configured.
+ * Make sure that the vcpu is consistent with it.
+ */
+ if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
+ return 0;
+
+ return -EINVAL;
+ }
+
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
- return false;
+ return -EINVAL;
/* MTE is incompatible with AArch32 */
- if (kvm_has_mte(vcpu->kvm) && is32bit)
- return false;
+ if (kvm_has_mte(kvm) && is32bit)
+ return -EINVAL;
- /* Check that the vcpus are either all 32bit or all 64bit */
- kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
- return false;
- }
+ if (is32bit)
+ set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
- return true;
+ set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
+
+ return 0;
}
/**
@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
u32 pstate;
mutex_lock(&vcpu->kvm->lock);
- reset_state = vcpu->arch.reset_state;
- WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+ ret = kvm_set_vm_width(vcpu);
+ if (!ret) {
+ reset_state = vcpu->arch.reset_state;
+ WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+ }
mutex_unlock(&vcpu->kvm->lock);
+ if (ret)
+ return ret;
+
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
- if (!vcpu_allowed_register_width(vcpu)) {
- ret = -EINVAL;
- goto out;
- }
-
switch (vcpu->arch.target) {
default:
- if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+ if (vcpu_el1_is_32bit(vcpu)) {
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
index f38c40a76251..78cde687383c 100644
--- a/arch/arm64/kvm/vgic/vgic-debug.c
+++ b/arch/arm64/kvm/vgic/vgic-debug.c
@@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
{
- struct kvm *kvm = (struct kvm *)s->private;
+ struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
mutex_lock(&kvm->lock);
@@ -110,7 +110,7 @@ out:
static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct kvm *kvm = (struct kvm *)s->private;
+ struct kvm *kvm = s->private;
struct vgic_state_iter *iter = kvm->arch.vgic.iter;
++*pos;
@@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
static void vgic_debug_stop(struct seq_file *s, void *v)
{
- struct kvm *kvm = (struct kvm *)s->private;
+ struct kvm *kvm = s->private;
struct vgic_state_iter *iter;
/*
@@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
static int vgic_debug_show(struct seq_file *s, void *v)
{
- struct kvm *kvm = (struct kvm *)s->private;
- struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+ struct kvm *kvm = s->private;
+ struct vgic_state_iter *iter = v;
struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL;
unsigned long flags;
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 089fc2ffcb43..2e13402be3bd 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
void *ptr, void *opaque)
{
- struct its_device *dev = (struct its_device *)opaque;
+ struct its_device *dev = opaque;
struct its_collection *collection;
struct kvm *kvm = its->dev->kvm;
struct kvm_vcpu *vcpu = NULL;
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 40a583e9d3c7..97a76a8619fb 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -101,7 +101,7 @@ __module_alloc(unsigned long size, unsigned long start, unsigned long end, bool
* too.
*/
return __vmalloc_node_range(size, 1, start, end, gfp, prot,
- VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
+ VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __builtin_return_address(0));
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index f5cbfe5efd25..f80cce0e3899 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -615,23 +615,22 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
return;
}
- /* Conditionally hard-enable interrupts. */
- if (should_hard_irq_enable()) {
- /*
- * Ensure a positive value is written to the decrementer, or
- * else some CPUs will continue to take decrementer exceptions.
- * When the PPC_WATCHDOG (decrementer based) is configured,
- * keep this at most 31 bits, which is about 4 seconds on most
- * systems, which gives the watchdog a chance of catching timer
- * interrupt hard lockups.
- */
- if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
- set_dec(0x7fffffff);
- else
- set_dec(decrementer_max);
+ /*
+ * Ensure a positive value is written to the decrementer, or
+ * else some CPUs will continue to take decrementer exceptions.
+ * When the PPC_WATCHDOG (decrementer based) is configured,
+ * keep this at most 31 bits, which is about 4 seconds on most
+ * systems, which gives the watchdog a chance of catching timer
+ * interrupt hard lockups.
+ */
+ if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
+ set_dec(0x7fffffff);
+ else
+ set_dec(decrementer_max);
+ /* Conditionally hard-enable interrupts. */
+ if (should_hard_irq_enable())
do_hard_irq_enable();
- }
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index e4ce2a35483f..42851c32ff3b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -168,9 +168,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
return -EINVAL;
/* Read the entry from guest memory */
addr = base + (index * sizeof(rpte));
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (ret) {
if (pte_ret_p)
*pte_ret_p = addr;
@@ -246,9 +247,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Read the table to find the root of the radix tree */
ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (ret)
return ret;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index d42b4b6d4a79..85cfa6328222 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
tbl[idx % TCES_PER_PAGE] = tce;
}
-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
- unsigned long entry)
+static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
+ struct iommu_table *tbl, unsigned long entry)
{
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
+ unsigned long i;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+ for (i = 0; i < subpages; ++i) {
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
+ }
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
break;
}
+ iommu_tce_kill(tbl, io_entry, subpages);
+
return ret;
}
@@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
break;
}
+ iommu_tce_kill(tbl, io_entry, subpages);
+
return ret;
}
@@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
- iommu_tce_kill(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
goto unlock_exit;
}
}
@@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
- goto invalidate_exit;
+ goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
- goto invalidate_exit;
+ goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
- entry);
- goto invalidate_exit;
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
+ entry + i);
+ goto unlock_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill(stit->tbl, entry, npages);
-
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
- goto invalidate_exit;
+ return ret;
WARN_ON_ONCE(1);
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
-
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 870b7f0c7ea5..fdeda6a9cff4 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
tbl->it_ops->tce_kill(tbl, entry, pages, true);
}
-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
- unsigned long entry)
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
+ struct iommu_table *tbl, unsigned long entry)
{
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
+ unsigned long i;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+ for (i = 0; i < subpages; ++i) {
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
+ }
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
break;
}
+ iommu_tce_kill_rm(tbl, io_entry, subpages);
+
return ret;
}
@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
break;
}
+ iommu_tce_kill_rm(tbl, io_entry, subpages);
+
return ret;
}
@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
- iommu_tce_kill_rm(stit->tbl, entry, 1);
-
if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
return ret;
}
}
@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ua = 0;
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
- goto invalidate_exit;
+ goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
- entry);
- goto invalidate_exit;
+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
+ entry + i);
+ goto unlock_exit;
}
}
kvmppc_rm_tce_put(stt, entry + i, tce);
}
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill_rm(stit->tbl, entry, npages);
-
unlock_exit:
if (!prereg)
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
- goto invalidate_exit;
+ return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
-
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 9d373f8963ee..c943a051c6e7 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -306,10 +306,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
regs_ptr = kvmppc_get_gpr(vcpu, 5);
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
hv_ptr, regs_ptr);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_PARAMETER;
@@ -410,10 +410,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
byteswap_hv_regs(&l2_hv);
byteswap_pt_regs(&l2_regs);
}
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
hv_ptr, regs_ptr);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_AUTHORITY;
@@ -600,16 +600,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
goto not_found;
/* Write what was loaded into our buffer back to the L1 guest */
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
} else {
/* Load the data to be stored from the L1 guest into our buf */
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 0f847f1e5ddd..6808bda0dbc1 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -229,9 +229,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
*/
args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto fail;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 875c30c12db0..533c4232e5ab 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -425,9 +425,9 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
return EMULATE_DO_MMIO;
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 2f46e31c7612..4f53d0b97539 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -3,11 +3,11 @@
obj-y += callchain.o callchain_$(BITS).o perf_regs.o
obj-$(CONFIG_COMPAT) += callchain_32.o
-obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o \
isa207-common.o power8-pmu.o power9-pmu.o \
- generic-compat-pmu.o power10-pmu.o
+ generic-compat-pmu.o power10-pmu.o bhrb.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index d3398100a60f..c6d51e7093cf 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
- { PM_CYC_ALT, PM_CYC },
{ PM_INST_CMPL_ALT, PM_INST_CMPL },
+ { PM_CYC_ALT, PM_CYC },
};
static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index c9eb5232e68b..c393e837648e 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
- { PM_INST_DISP, PM_INST_DISP_ALT },
- { PM_RUN_CYC_ALT, PM_RUN_CYC },
- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
+ { PM_INST_DISP, PM_INST_DISP_ALT },
+ { PM_RUN_CYC_ALT, PM_RUN_CYC },
+ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
+ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 34592d00dde8..f6ef358d8a2c 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -38,7 +38,7 @@ config SOC_VIRT
select SIFIVE_PLIC
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
- select RISCV_SBI_CPUIDLE if CPU_IDLE
+ select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI
help
This enables support for QEMU Virt Machine.
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi
index 854320e17b28..ccaac3371cf9 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi
@@ -7,7 +7,7 @@
reg = <0x0 0x41000000 0x0 0xF0>;
microchip,sync-update-mask = /bits/ 32 <0>;
#pwm-cells = <2>;
- clocks = <&clkcfg CLK_FIC3>;
+ clocks = <&fabric_clk3>;
status = "disabled";
};
@@ -16,10 +16,22 @@
reg = <0x0 0x44000000 0x0 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&clkcfg CLK_FIC3>;
+ clocks = <&fabric_clk3>;
interrupt-parent = <&plic>;
interrupts = <122>;
clock-frequency = <100000>;
status = "disabled";
};
+
+ fabric_clk3: fabric-clk3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <62500000>;
+ };
+
+ fabric_clk1: fabric-clk1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
};
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
index cd2fe80fa81a..3392153dd0f1 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
@@ -45,7 +45,7 @@
};
&refclk {
- clock-frequency = <600000000>;
+ clock-frequency = <125000000>;
};
&mmuart1 {
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
index c5c9d1360de0..746c4d4e7686 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
@@ -141,7 +141,7 @@
};
};
- refclk: msspllclk {
+ refclk: mssrefclk {
compatible = "fixed-clock";
#clock-cells = <0>;
};
@@ -190,7 +190,7 @@
clkcfg: clkcfg@20002000 {
compatible = "microchip,mpfs-clkcfg";
- reg = <0x0 0x20002000 0x0 0x1000>;
+ reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>;
clocks = <&refclk>;
#clock-cells = <1>;
};
@@ -393,8 +393,8 @@
reg = <0x0 0x20124000 0x0 0x1000>;
interrupt-parent = <&plic>;
interrupts = <80>, <81>;
- clocks = <&clkcfg CLK_RTC>;
- clock-names = "rtc";
+ clocks = <&clkcfg CLK_RTC>, <&clkcfg CLK_RTCREF>;
+ clock-names = "rtc", "rtcref";
status = "disabled";
};
@@ -424,7 +424,7 @@
<0 0 0 3 &pcie_intc 2>,
<0 0 0 4 &pcie_intc 3>;
interrupt-map-mask = <0 0 0 7>;
- clocks = <&clkcfg CLK_FIC0>, <&clkcfg CLK_FIC1>, <&clkcfg CLK_FIC3>;
+ clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>;
clock-names = "fic0", "fic1", "fic3";
ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>;
msi-parent = <&pcie>;
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 30e3017f22bc..0cc17db8aaba 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -101,6 +101,7 @@ CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 7e5efdc3829d..6cd9d84d3e13 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -93,6 +93,7 @@ CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_CTRL=y
CONFIG_RPMSG_VIRTIO=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 78da839657e5..cd4bbcecb0fb 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -193,9 +193,6 @@ struct kvm_vcpu_arch {
/* Don't run the VCPU (blocked) */
bool pause;
-
- /* SRCU lock index for in-kernel run loop */
- int srcu_idx;
};
static inline void kvm_arch_hardware_unsetup(void) {}
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 0b552873a577..765004b60513 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -104,7 +104,7 @@ static int patch_text_cb(void *data)
struct patch_insn *patch = data;
int ret = 0;
- if (atomic_inc_return(&patch->cpu_count) == 1) {
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
ret =
patch_text_nosync(patch->addr, &patch->insn,
GET_INSN_LENGTH(patch->insn));
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 624166004e36..7461f964d20a 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -38,14 +38,16 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-#define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
- riscv_isa_extension_mask(c) | \
- riscv_isa_extension_mask(d) | \
- riscv_isa_extension_mask(f) | \
- riscv_isa_extension_mask(i) | \
- riscv_isa_extension_mask(m) | \
- riscv_isa_extension_mask(s) | \
- riscv_isa_extension_mask(u))
+#define KVM_RISCV_ISA_DISABLE_ALLOWED (riscv_isa_extension_mask(d) | \
+ riscv_isa_extension_mask(f))
+
+#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED (riscv_isa_extension_mask(a) | \
+ riscv_isa_extension_mask(c) | \
+ riscv_isa_extension_mask(i) | \
+ riscv_isa_extension_mask(m))
+
+#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
+ KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
@@ -219,7 +221,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
if (!vcpu->arch.ran_atleast_once) {
- vcpu->arch.isa = reg_val;
+ /* Ignore the disable request for these extensions */
+ vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
vcpu->arch.isa &= riscv_isa_extension_base(NULL);
vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
kvm_riscv_vcpu_fp_reset(vcpu);
@@ -653,8 +656,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
- csr_write(CSR_HGATP, 0);
-
csr->vsstatus = csr_read(CSR_VSSTATUS);
csr->vsie = csr_read(CSR_VSIE);
csr->vstvec = csr_read(CSR_VSTVEC);
@@ -726,13 +727,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Mark this VCPU ran at least once */
vcpu->arch.ran_atleast_once = true;
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
/* Process MMIO value returned from user-space */
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
if (ret) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
}
@@ -741,13 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
if (ret) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
}
if (run->immediate_exit) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
return -EINTR;
}
@@ -786,7 +787,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
vcpu->mode = IN_GUEST_MODE;
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
smp_mb__after_srcu_read_unlock();
/*
@@ -804,7 +805,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
preempt_enable();
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
@@ -848,7 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_enable();
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
}
@@ -857,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu_put(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index aa8af129e4bb..a72c15d4b42a 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -456,9 +456,9 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
- vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index 4449a976e5a6..d4308c512007 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
+#include <asm/hwcap.h>
#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index a09ecb97b890..d45e7da3f0d3 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -83,7 +83,7 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
- u32 type, u64 flags)
+ u32 type, u64 reason)
{
unsigned long i;
struct kvm_vcpu *tmp;
@@ -94,7 +94,8 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
memset(&run->system_event, 0, sizeof(run->system_event));
run->system_event.type = type;
- run->system_event.flags = flags;
+ run->system_event.ndata = 1;
+ run->system_event.data[0] = reason;
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 9535bea8688c..b0793dc0c291 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -718,6 +718,7 @@ retry:
if (!check_l4) {
disable_pgtable_l5();
check_l4 = true;
+ memset(early_pg_dir, 0, PAGE_SIZE);
goto retry;
}
disable_pgtable_l4();
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 77b5a03de13a..e084c72104f8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -255,6 +255,10 @@ config HAVE_MARCH_Z15_FEATURES
def_bool n
select HAVE_MARCH_Z14_FEATURES
+config HAVE_MARCH_Z16_FEATURES
+ def_bool n
+ select HAVE_MARCH_Z15_FEATURES
+
choice
prompt "Processor type"
default MARCH_Z196
@@ -312,6 +316,14 @@ config MARCH_Z15
and 8561 series). The kernel will be slightly faster but will not
work on older machines.
+config MARCH_Z16
+ bool "IBM z16"
+ select HAVE_MARCH_Z16_FEATURES
+ depends on $(cc-option,-march=z16)
+ help
+ Select this to enable optimizations for IBM z16 (3931 and
+ 3932 series).
+
endchoice
config MARCH_Z10_TUNE
@@ -332,6 +344,9 @@ config MARCH_Z14_TUNE
config MARCH_Z15_TUNE
def_bool TUNE_Z15 || MARCH_Z15 && TUNE_DEFAULT
+config MARCH_Z16_TUNE
+ def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -372,6 +387,10 @@ config TUNE_Z15
bool "IBM z15"
depends on $(cc-option,-mtune=z15)
+config TUNE_Z16
+ bool "IBM z16"
+ depends on $(cc-option,-mtune=z16)
+
endchoice
config 64BIT
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 7a65bca1e5af..e441b60b1812 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
mflags-$(CONFIG_MARCH_Z13) := -march=z13
mflags-$(CONFIG_MARCH_Z14) := -march=z14
mflags-$(CONFIG_MARCH_Z15) := -march=z15
+mflags-$(CONFIG_MARCH_Z16) := -march=z16
export CC_FLAGS_MARCH := $(mflags-y)
@@ -54,6 +55,7 @@ cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
cflags-$(CONFIG_MARCH_Z14_TUNE) += -mtune=z14
cflags-$(CONFIG_MARCH_Z15_TUNE) += -mtune=z15
+cflags-$(CONFIG_MARCH_Z16_TUNE) += -mtune=z16
cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 498bed9b261b..f6dfde577ce8 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -499,11 +499,13 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -588,13 +590,13 @@ CONFIG_MLX5_INFINIBAND=m
CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
CONFIG_VFIO_MDEV=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -690,6 +692,7 @@ CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
@@ -733,6 +736,7 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
@@ -786,7 +790,6 @@ CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
@@ -814,6 +817,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_KFENCE=y
+CONFIG_KFENCE_DEFERRABLE=y
CONFIG_KFENCE_STATIC_KEYS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 61e36b999f67..706df3a4a867 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -490,11 +490,13 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -578,13 +580,13 @@ CONFIG_MLX5_INFINIBAND=m
CONFIG_SYNC_FILE=y
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
CONFIG_VFIO_MDEV=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -720,6 +722,7 @@ CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
@@ -772,7 +775,6 @@ CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index c55c668dc3c7..a87fcc45e307 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -26,6 +26,7 @@ CONFIG_CRASH_DUMP=y
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
# CONFIG_GCC_PLUGINS is not set
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
@@ -60,7 +61,6 @@ CONFIG_ZFCP=y
# CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
@@ -71,10 +71,10 @@ CONFIG_LSM="yama,loadpin,safesetid,integrity"
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_PRINTK_TIME=y
# CONFIG_SYMBOLIC_ERRNAME is not set
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index eabab24b71dd..2f0a1cacdf85 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -58,7 +58,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static inline bool on_thread_stack(void)
{
- return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
}
#endif
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index eee8d96fb38e..ff1e25d515a8 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -200,13 +200,7 @@ unsigned long __get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
-static __always_inline unsigned long current_stack_pointer(void)
-{
- unsigned long sp;
-
- asm volatile("la %0,0(15)" : "=a" (sp));
- return sp;
-}
+register unsigned long current_stack_pointer asm("r15");
static __always_inline unsigned short stap(void)
{
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 275f4258fbd5..f8500191993d 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -46,7 +46,7 @@ struct stack_frame {
};
/*
- * Unlike current_stack_pointer() which simply returns current value of %r15
+ * Unlike current_stack_pointer which simply contains the current value of %r15
* current_frame_address() returns function stack frame address, which matches
* %r15 upon function invocation. It may differ from %r15 later if function
* allocates stack for local variables or new stack frame to call other
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index b2ef014a9287..6ebf02e15c85 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -54,7 +54,7 @@ static void __do_machine_kdump(void *image)
* This need to be done *after* s390_reset_system set the
* prefix register of this CPU to zero
*/
- memcpy((void *) __LC_FPREGS_SAVE_AREA,
+ memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
(void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 7a74ea5f7531..aa0e0e7fc773 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -283,6 +283,10 @@ static int __init setup_elf_platform(void)
case 0x8562:
strcpy(elf_platform, "z15");
break;
+ case 0x3931:
+ case 0x3932:
+ strcpy(elf_platform, "z16");
+ break;
}
return 0;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9b30beac904d..af96dc0549a4 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1334,11 +1334,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
vcpu->valid_wakeup = false;
__unset_cpu_idle(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
hrtimer_cancel(&vcpu->arch.ckc_timer);
return 0;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 156d1c25a3c1..da3dabda1a12 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4237,14 +4237,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
* We try to hold kvm->srcu during most of vcpu_run (except when run-
* ning the guest), so that memslots (and other stuff) are protected
*/
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
do {
rc = vcpu_pre_run(vcpu);
if (rc)
break;
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
/*
* As PF_VCPU will be used in fault handler, between
* guest_enter and guest_exit should be no uaccess.
@@ -4281,12 +4281,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
__enable_cpu_timer_accounting(vcpu);
guest_exit_irqoff();
local_irq_enable();
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = vcpu_post_run(vcpu, exit_reason);
} while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
return rc;
}
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 7f7c0d6af2ce..cc7c9599f43e 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -137,12 +137,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
- /*
- * The Create Secure Configuration Ultravisor Call does not support
- * using large pages for the virtual memory area.
- * This is a hardware limitation.
- */
- kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
+ kvm->arch.pv.stor_var = vzalloc(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index acda4b6fc851..dada78b92691 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1091,7 +1091,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
handle_last_fault(vcpu, vsie_page);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
/* save current guest state of bp isolation override */
guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
@@ -1133,7 +1133,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (!guest_bp_isolation)
clear_thread_flag(TIF_ISOLATE_BP_GUEST);
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
if (rc == -EINTR) {
VCPU_EVENT(vcpu, 3, "%s", "machine check");
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index 9bb067321ab4..5a053b393d5c 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -147,7 +147,7 @@ static __always_inline struct pt_regs fake_pt_regs(void)
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
- regs.gprs[15] = current_stack_pointer();
+ regs.gprs[15] = current_stack_pointer;
asm volatile(
"basr %[psw_addr],0\n"
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index 41c6d734a474..adb6991d0455 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -35,6 +35,7 @@
#define flush_page_for_dma(addr) \
sparc32_cachetlb_ops->page_for_dma(addr)
+struct page;
void sparc_flush_page_to_ram(struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 374d3e85c1f1..675416deb298 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1872,7 +1872,7 @@ config X86_KERNEL_IBT
code with them to make this happen.
In addition to building the kernel with IBT, seal all functions that
- are not indirect call targets, avoiding them ever becomming one.
+ are not indirect call targets, avoiding them ever becoming one.
This requires LTO like objtool runs and will slow down the build. It
does significantly reduce the number of ENDBR instructions in the
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4faac48ebec5..73d958522b6a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -337,6 +337,9 @@ SYM_CODE_END(ret_from_fork)
call \cfunc
+ /* For some configurations \cfunc ends up being a noreturn. */
+ REACHABLE
+
jmp error_return
.endm
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 5d7762288a24..48e5db21142c 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -51,7 +51,7 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- * TGL,TNT,RKL,ADL,RPL
+ * TGL,TNT,RKL,ADL,RPL,SPR
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -62,7 +62,7 @@
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- * RPL
+ * RPL,SPR
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
@@ -74,7 +74,7 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- * TGL,TNT,RKL,ADL,RPL
+ * TGL,TNT,RKL,ADL,RPL,SPR
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -675,6 +675,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 7516e4199b3c..20fd0acd7d80 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -28,15 +28,13 @@ typedef u16 compat_ipc_pid_t;
typedef __kernel_fsid_t compat_fsid_t;
struct compat_stat {
- compat_dev_t st_dev;
- u16 __pad1;
+ u32 st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
- compat_dev_t st_rdev;
- u16 __pad2;
+ u32 st_rdev;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 048b6d5aff50..def6ca121111 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -26,6 +26,7 @@
* _G - parts with extra graphics on
* _X - regular server parts
* _D - micro server parts
+ * _N,_P - other mobile parts
*
* Historical OPTDIFFs:
*
@@ -107,8 +108,10 @@
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
+#define INTEL_FAM6_ALDERLAKE_N 0xBE
#define INTEL_FAM6_RAPTORLAKE 0xB7
+#define INTEL_FAM6_RAPTORLAKE_P 0xBA
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index f6d91ecb8026..e9736af126b2 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -210,8 +210,6 @@ void __iomem *ioremap(resource_size_t offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
#define iounmap iounmap
-extern void set_iounmap_nonlazy(void);
-
#ifdef __KERNEL__
void memcpy_fromio(void *, const volatile void __iomem *, size_t);
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 3c368b639c04..1a6d7e3f6c32 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -118,6 +118,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_register_region)
KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
+KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
KVM_X86_OP(get_msr_feature)
KVM_X86_OP(can_emulate_instruction)
KVM_X86_OP(apic_init_signal_blocked)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index d23e80a56eb8..4ff36610af6a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -974,12 +974,10 @@ enum hv_tsc_page_status {
HV_TSC_PAGE_UNSET = 0,
/* TSC page MSR was written by the guest, update pending */
HV_TSC_PAGE_GUEST_CHANGED,
- /* TSC page MSR was written by KVM userspace, update pending */
+ /* TSC page update was triggered from the host side */
HV_TSC_PAGE_HOST_CHANGED,
/* TSC page was properly set up and is currently active */
HV_TSC_PAGE_SET,
- /* TSC page is currently being updated and therefore is inactive */
- HV_TSC_PAGE_UPDATING,
/* TSC page was set up with an inaccessible GPA */
HV_TSC_PAGE_BROKEN,
};
@@ -1052,6 +1050,7 @@ enum kvm_apicv_inhibit {
APICV_INHIBIT_REASON_X2APIC,
APICV_INHIBIT_REASON_BLOCKIRQ,
APICV_INHIBIT_REASON_ABSENT,
+ APICV_INHIBIT_REASON_SEV,
};
struct kvm_arch {
@@ -1485,6 +1484,7 @@ struct kvm_x86_ops {
int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
+ void (*guest_memory_reclaimed)(struct kvm *kvm);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
@@ -1585,8 +1585,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
#define kvm_arch_pmi_in_guest(vcpu) \
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
-int kvm_mmu_module_init(void);
-void kvm_mmu_module_exit(void);
+void kvm_mmu_x86_module_init(void);
+int kvm_mmu_vendor_module_init(void);
+void kvm_mmu_vendor_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index d6bfdfb0f0af..0c3d3440fe27 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -131,10 +131,12 @@ extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
extern bool initrd_gone;
+void microcode_bsp_resume(void);
#else
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { }
+static inline void microcode_bsp_resume(void) { }
#endif
#endif /* _ASM_X86_MICROCODE_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 0eb90d21049e..ee15311b6be1 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -128,9 +128,9 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
-/* SRBDS support */
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
-#define RNGDS_MITG_DIS BIT(0)
+#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
+#define RTM_ALLOW BIT(1) /* TSX development mode */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 40497a9020c6..407084d9fd99 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -559,10 +559,6 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
-
-struct mm_struct;
-extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
- unsigned int *level);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
index 2455d721503e..2d8dacd02643 100644
--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -26,6 +26,7 @@
".align 4 \n" \
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
STATIC_CALL_TRAMP_STR(name) ": \n" \
+ ANNOTATE_NOENDBR \
insns " \n" \
".byte 0x53, 0x43, 0x54 \n" \
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ed4417500700..e342ae4db3c4 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1855,6 +1855,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
update_srbds_msr();
+
+ tsx_ap_init();
}
static __init int setup_noclflush(char *arg)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index ee6f23f7587d..2a8e584fc991 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -55,11 +55,10 @@ enum tsx_ctrl_states {
extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
-extern void tsx_enable(void);
-extern void tsx_disable(void);
-extern void tsx_clear_cpuid(void);
+void tsx_ap_init(void);
#else
static inline void tsx_init(void) { }
+static inline void tsx_ap_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */
extern void get_cpu_cap(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8321c43554a1..f7a5370a9b3b 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c)
init_intel_misc_features(c);
- if (tsx_ctrl_state == TSX_CTRL_ENABLE)
- tsx_enable();
- else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
- tsx_disable();
- else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
- tsx_clear_cpuid();
-
split_lock_init();
bus_lock_init();
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index f955d25076ba..239ff5fcec6a 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -758,9 +758,9 @@ static struct subsys_interface mc_cpu_interface = {
};
/**
- * mc_bp_resume - Update boot CPU microcode during resume.
+ * microcode_bsp_resume - Update boot CPU microcode during resume.
*/
-static void mc_bp_resume(void)
+void microcode_bsp_resume(void)
{
int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
@@ -772,7 +772,7 @@ static void mc_bp_resume(void)
}
static struct syscore_ops mc_syscore_ops = {
- .resume = mc_bp_resume,
+ .resume = microcode_bsp_resume,
};
static int mc_cpu_starting(unsigned int cpu)
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index 9c7a5f049292..ec7bbac3a9f2 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -19,7 +19,7 @@
enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
-void tsx_disable(void)
+static void tsx_disable(void)
{
u64 tsx;
@@ -39,7 +39,7 @@ void tsx_disable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
-void tsx_enable(void)
+static void tsx_enable(void)
{
u64 tsx;
@@ -58,7 +58,7 @@ void tsx_enable(void)
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
}
-static bool __init tsx_ctrl_is_supported(void)
+static bool tsx_ctrl_is_supported(void)
{
u64 ia32_cap = x86_read_arch_cap_msr();
@@ -84,7 +84,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
return TSX_CTRL_ENABLE;
}
-void tsx_clear_cpuid(void)
+/*
+ * Disabling TSX is not a trivial business.
+ *
+ * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT
+ * which says that TSX is practically disabled (all transactions are
+ * aborted by default). When that bit is set, the kernel unconditionally
+ * disables TSX.
+ *
+ * In order to do that, however, it needs to dance a bit:
+ *
+ * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and
+ * the MSR is present only when *two* CPUID bits are set:
+ *
+ * - X86_FEATURE_RTM_ALWAYS_ABORT
+ * - X86_FEATURE_TSX_FORCE_ABORT
+ *
+ * 2. The second method is for CPUs which do not have the above-mentioned
+ * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX
+ * through that one. Those CPUs can also have the initially mentioned
+ * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy
+ * applies: TSX gets disabled unconditionally.
+ *
+ * When either of the two methods are present, the kernel disables TSX and
+ * clears the respective RTM and HLE feature flags.
+ *
+ * An additional twist in the whole thing presents late microcode loading
+ * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID
+ * bit to be set after the update.
+ *
+ * A subsequent hotplug operation on any logical CPU except the BSP will
+ * cause for the supported CPUID feature bits to get re-detected and, if
+ * RTM and HLE get cleared all of a sudden, but, userspace did consult
+ * them before the update, then funny explosions will happen. Long story
+ * short: the kernel doesn't modify CPUID feature bits after booting.
+ *
+ * That's why, this function's call in init_intel() doesn't clear the
+ * feature flags.
+ */
+static void tsx_clear_cpuid(void)
{
u64 msr;
@@ -97,6 +135,39 @@ void tsx_clear_cpuid(void)
rdmsrl(MSR_TSX_FORCE_ABORT, msr);
msr |= MSR_TFA_TSX_CPUID_CLEAR;
wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+ } else if (tsx_ctrl_is_supported()) {
+ rdmsrl(MSR_IA32_TSX_CTRL, msr);
+ msr |= TSX_CTRL_CPUID_CLEAR;
+ wrmsrl(MSR_IA32_TSX_CTRL, msr);
+ }
+}
+
+/*
+ * Disable TSX development mode
+ *
+ * When the microcode released in Feb 2022 is applied, TSX will be disabled by
+ * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123
+ * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is
+ * not recommended for production deployments. In particular, applying MD_CLEAR
+ * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient
+ * execution attack may not be effective on these processors when Intel TSX is
+ * enabled with updated microcode.
+ */
+static void tsx_dev_mode_disable(void)
+{
+ u64 mcu_opt_ctrl;
+
+ /* Check if RTM_ALLOW exists */
+ if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
+ !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
+ return;
+
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+
+ if (mcu_opt_ctrl & RTM_ALLOW) {
+ mcu_opt_ctrl &= ~RTM_ALLOW;
+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+ setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
}
}
@@ -105,14 +176,14 @@ void __init tsx_init(void)
char arg[5] = {};
int ret;
+ tsx_dev_mode_disable();
+
/*
- * Hardware will always abort a TSX transaction if both CPUID bits
- * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
- * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
- * here.
+ * Hardware will always abort a TSX transaction when the CPUID bit
+ * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate
+ * CPUID.RTM and CPUID.HLE bits. Clear them here.
*/
- if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
- boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
tsx_clear_cpuid();
setup_clear_cpu_cap(X86_FEATURE_RTM);
@@ -175,3 +246,16 @@ void __init tsx_init(void)
setup_force_cpu_cap(X86_FEATURE_HLE);
}
}
+
+void tsx_ap_init(void)
+{
+ tsx_dev_mode_disable();
+
+ if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+ tsx_enable();
+ else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ tsx_disable();
+ else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+ /* See comment over that function for more details. */
+ tsx_clear_cpuid();
+}
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index a7f617a3981d..97529552dd24 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -37,7 +37,6 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
} else
memcpy(buf, vaddr + offset, csize);
- set_iounmap_nonlazy();
iounmap((void __iomem *)vaddr);
return csize;
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a22deb58f86d..8b1c45c9cda8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
+static int has_guest_poll = 0;
/*
* No need for any "IO delay" on KVM
*/
@@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
static int kvm_suspend(void)
{
+ u64 val = 0;
+
kvm_guest_cpu_offline(false);
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
+ rdmsrl(MSR_KVM_POLL_CONTROL, val);
+ has_guest_poll = !(val & 1);
+#endif
return 0;
}
static void kvm_resume(void)
{
kvm_cpu_online(raw_smp_processor_id());
+
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+ if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
+ wrmsrl(MSR_KVM_POLL_CONTROL, 0);
+#endif
}
static struct syscore_ops kvm_syscore_ops = {
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 794fdef2501a..38185aedf7d1 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -339,11 +339,11 @@ static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
struct stack_info *info = &state->stack_info;
void *addr = (void *)_addr;
- if (!on_stack(info, addr, len) &&
- (get_stack_info(addr, state->task, info, &state->stack_mask)))
- return false;
+ if (on_stack(info, addr, len))
+ return true;
- return true;
+ return !get_stack_info(addr, state->task, info, &state->stack_mask) &&
+ on_stack(info, addr, len);
}
static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index b24ca7f4ed7c..598334ed5fbc 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1085,12 +1085,21 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
case 0x80000000:
entry->eax = min(entry->eax, 0x80000021);
/*
- * Serializing LFENCE is reported in a multitude of ways,
- * and NullSegClearsBase is not reported in CPUID on Zen2;
- * help userspace by providing the CPUID leaf ourselves.
+ * Serializing LFENCE is reported in a multitude of ways, and
+ * NullSegClearsBase is not reported in CPUID on Zen2; help
+ * userspace by providing the CPUID leaf ourselves.
+ *
+ * However, only do it if the host has CPUID leaf 0x8000001d.
+ * QEMU thinks that it can query the host blindly for that
+ * CPUID leaf if KVM reports that it supports 0x8000001d or
+ * above. The processor merrily returns values from the
+ * highest Intel leaf which QEMU tries to use as the guest's
+ * 0x8000001d. Even worse, this can result in an infinite
+ * loop if said highest leaf has no subleaves indexed by ECX.
*/
- if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
- || !static_cpu_has_bug(X86_BUG_NULL_SEG))
+ if (entry->eax >= 0x8000001d &&
+ (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
+ || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
entry->eax = max(entry->eax, 0x80000021);
break;
case 0x80000001:
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 123b677111c5..46f9dfb60469 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1135,11 +1135,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
+ mutex_lock(&hv->hv_lock);
+
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+ hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
- return;
+ goto out_unlock;
- mutex_lock(&hv->hv_lock);
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
goto out_unlock;
@@ -1201,45 +1203,19 @@ out_unlock:
mutex_unlock(&hv->hv_lock);
}
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+void kvm_hv_request_tsc_page_update(struct kvm *kvm)
{
struct kvm_hv *hv = to_kvm_hv(kvm);
- u64 gfn;
- int idx;
-
- if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
- hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
- tsc_page_update_unsafe(hv))
- return;
mutex_lock(&hv->hv_lock);
- if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
- goto out_unlock;
-
- /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
- if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
- hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
+ !tsc_page_update_unsafe(hv))
+ hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
- gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-
- hv->tsc_ref.tsc_sequence = 0;
-
- /*
- * Take the srcu lock as memslots will be accessed to check the gfn
- * cache generation against the memslots generation.
- */
- idx = srcu_read_lock(&kvm->srcu);
- if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
- &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
- hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
- srcu_read_unlock(&kvm->srcu, idx);
-
-out_unlock:
mutex_unlock(&hv->hv_lock);
}
-
static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
{
if (!hv_vcpu->enforce_cpuid)
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index e19c00ee9ab3..da2737f2a956 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -137,7 +137,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock);
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
+void kvm_hv_request_tsc_page_update(struct kvm *kvm);
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index e6cae6f22683..a335e7f1f69e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -65,6 +65,30 @@ static __always_inline u64 rsvd_bits(int s, int e)
return ((2ULL << (e - s)) - 1) << s;
}
+/*
+ * The number of non-reserved physical address bits irrespective of features
+ * that repurpose legal bits, e.g. MKTME.
+ */
+extern u8 __read_mostly shadow_phys_bits;
+
+static inline gfn_t kvm_mmu_max_gfn(void)
+{
+ /*
+ * Note that this uses the host MAXPHYADDR, not the guest's.
+ * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
+ * assuming KVM is running on bare metal, guest accesses beyond
+ * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
+ * (either EPT Violation/Misconfig or #NPF), and so KVM will never
+ * install a SPTE for such addresses. If KVM is running as a VM
+ * itself, on the other hand, it might see a MAXPHYADDR that is less
+ * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR
+ * disallows such SPTEs entirely and simplifies the TDP MMU.
+ */
+ int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52;
+
+ return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
+}
+
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8f19ea752704..64a2a7e2be90 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2804,8 +2804,12 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
const struct kvm_memory_slot *slot)
{
unsigned long hva;
- pte_t *pte;
- int level;
+ unsigned long flags;
+ int level = PG_LEVEL_4K;
+ pgd_t pgd;
+ p4d_t p4d;
+ pud_t pud;
+ pmd_t pmd;
if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
return PG_LEVEL_4K;
@@ -2820,10 +2824,43 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
*/
hva = __gfn_to_hva_memslot(slot, gfn);
- pte = lookup_address_in_mm(kvm->mm, hva, &level);
- if (unlikely(!pte))
- return PG_LEVEL_4K;
+ /*
+ * Lookup the mapping level in the current mm. The information
+ * may become stale soon, but it is safe to use as long as
+ * 1) mmu_notifier_retry was checked after taking mmu_lock, and
+ * 2) mmu_lock is taken now.
+ *
+ * We still need to disable IRQs to prevent concurrent tear down
+ * of page tables.
+ */
+ local_irq_save(flags);
+
+ pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
+ if (pgd_none(pgd))
+ goto out;
+
+ p4d = READ_ONCE(*p4d_offset(&pgd, hva));
+ if (p4d_none(p4d) || !p4d_present(p4d))
+ goto out;
+
+ pud = READ_ONCE(*pud_offset(&p4d, hva));
+ if (pud_none(pud) || !pud_present(pud))
+ goto out;
+
+ if (pud_large(pud)) {
+ level = PG_LEVEL_1G;
+ goto out;
+ }
+ pmd = READ_ONCE(*pmd_offset(&pud, hva));
+ if (pmd_none(pmd) || !pmd_present(pmd))
+ goto out;
+
+ if (pmd_large(pmd))
+ level = PG_LEVEL_2M;
+
+out:
+ local_irq_restore(flags);
return level;
}
@@ -2992,9 +3029,15 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
/*
* If MMIO caching is disabled, emulate immediately without
* touching the shadow page tables as attempting to install an
- * MMIO SPTE will just be an expensive nop.
+ * MMIO SPTE will just be an expensive nop. Do not cache MMIO
+ * whose gfn is greater than host.MAXPHYADDR, any guest that
+ * generates such gfns is running nested and is being tricked
+ * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if
+ * and only if L1's MAXPHYADDR is inaccurate with respect to
+ * the hardware's).
*/
- if (unlikely(!shadow_mmio_value)) {
+ if (unlikely(!shadow_mmio_value) ||
+ unlikely(fault->gfn > kvm_mmu_max_gfn())) {
*ret_val = RET_PF_EMULATE;
return true;
}
@@ -6237,12 +6280,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
return 0;
}
-int kvm_mmu_module_init(void)
+/*
+ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
+ * its default value of -1 is technically undefined behavior for a boolean.
+ */
+void kvm_mmu_x86_module_init(void)
{
- int ret = -ENOMEM;
-
if (nx_huge_pages == -1)
__set_nx_huge_pages(get_nx_auto_mode());
+}
+
+/*
+ * The bulk of the MMU initialization is deferred until the vendor module is
+ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
+ * to be reset when a potentially different vendor module is loaded.
+ */
+int kvm_mmu_vendor_module_init(void)
+{
+ int ret = -ENOMEM;
/*
* MMU roles use union aliasing which is, generally speaking, an
@@ -6290,7 +6345,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
mmu_free_memory_caches(vcpu);
}
-void kvm_mmu_module_exit(void)
+void kvm_mmu_vendor_module_exit(void)
{
mmu_destroy_caches();
percpu_counter_destroy(&kvm_total_used_mmu_pages);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 73f12615416f..e4abeb5df1b1 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -201,12 +201,6 @@ static inline bool is_removed_spte(u64 spte)
*/
extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
-/*
- * The number of non-reserved physical address bits irrespective of features
- * that repurpose legal bits, e.g. MKTME.
- */
-extern u8 __read_mostly shadow_phys_bits;
-
static inline bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_value &&
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d71d177ae6b8..edc68538819b 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -51,7 +51,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
if (!kvm->arch.tdp_mmu_enabled)
return;
- flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ /* Also waits for any queued work items. */
destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
@@ -815,14 +815,15 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
return iter->yielded;
}
-static inline gfn_t tdp_mmu_max_gfn_host(void)
+static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
{
/*
- * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that
- * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF,
- * and so KVM will never install a SPTE for such addresses.
+ * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with
+ * a gpa range that would exceed the max gfn, and KVM does not create
+ * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
+ * the slow emulation path every time.
*/
- return 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+ return kvm_mmu_max_gfn() + 1;
}
static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
@@ -830,7 +831,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
{
struct tdp_iter iter;
- gfn_t end = tdp_mmu_max_gfn_host();
+ gfn_t end = tdp_mmu_max_gfn_exclusive();
gfn_t start = 0;
for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
@@ -923,7 +924,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
{
struct tdp_iter iter;
- end = min(end, tdp_mmu_max_gfn_host());
+ end = min(end, tdp_mmu_max_gfn_exclusive());
lockdep_assert_held_write(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 9e66fba1d6a3..22992b049d38 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -138,6 +138,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
return sample_period;
}
+static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
+{
+ if (!pmc->perf_event || pmc->is_paused)
+ return;
+
+ perf_event_period(pmc->perf_event,
+ get_sample_period(pmc, pmc->counter));
+}
+
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index a1cf9c31273b..421619540ff9 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -837,7 +837,8 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
BIT(APICV_INHIBIT_REASON_IRQWIN) |
BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
BIT(APICV_INHIBIT_REASON_X2APIC) |
- BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
+ BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
+ BIT(APICV_INHIBIT_REASON_SEV);
return supported & BIT(reason);
}
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 24eb935b6f85..b14860863c39 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -257,6 +257,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
if (pmc) {
pmc->counter += data - pmc_read_counter(pmc);
+ pmc_update_sample_period(pmc);
return 0;
}
/* MSR_EVNTSELn */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 75fa6dd268f0..0ad70c12c7c3 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -260,6 +260,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
INIT_LIST_HEAD(&sev->regions_list);
INIT_LIST_HEAD(&sev->mirror_vms);
+ kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
+
return 0;
e_free:
@@ -465,6 +467,7 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
page_virtual = kmap_atomic(pages[i]);
clflush_cache_range(page_virtual, PAGE_SIZE);
kunmap_atomic(page_virtual);
+ cond_resched();
}
}
@@ -2223,51 +2226,47 @@ int sev_cpu_init(struct svm_cpu_data *sd)
* Pages used by hardware to hold guest encrypted state must be flushed before
* returning them to the system.
*/
-static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
- unsigned long len)
+static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
{
+ int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
+
/*
- * If hardware enforced cache coherency for encrypted mappings of the
- * same physical page is supported, nothing to do.
+ * Note! The address must be a kernel address, as regular page walk
+ * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
+ * address is non-deterministic and unsafe. This function deliberately
+ * takes a pointer to deter passing in a user address.
*/
- if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
- return;
+ unsigned long addr = (unsigned long)va;
/*
- * If the VM Page Flush MSR is supported, use it to flush the page
- * (using the page virtual address and the guest ASID).
+ * If CPU enforced cache coherency for encrypted mappings of the
+ * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
+ * flush is still needed in order to work properly with DMA devices.
*/
- if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
- struct kvm_sev_info *sev;
- unsigned long va_start;
- u64 start, stop;
-
- /* Align start and stop to page boundaries. */
- va_start = (unsigned long)va;
- start = (u64)va_start & PAGE_MASK;
- stop = PAGE_ALIGN((u64)va_start + len);
-
- if (start < stop) {
- sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+ if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
+ clflush_cache_range(va, PAGE_SIZE);
+ return;
+ }
- while (start < stop) {
- wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
- start | sev->asid);
+ /*
+ * VM Page Flush takes a host virtual address and a guest ASID. Fall
+ * back to WBINVD if this faults so as not to make any problems worse
+ * by leaving stale encrypted data in the cache.
+ */
+ if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
+ goto do_wbinvd;
- start += PAGE_SIZE;
- }
+ return;
- return;
- }
+do_wbinvd:
+ wbinvd_on_all_cpus();
+}
- WARN(1, "Address overflow, using WBINVD\n");
- }
+void sev_guest_memory_reclaimed(struct kvm *kvm)
+{
+ if (!sev_guest(kvm))
+ return;
- /*
- * Hardware should always have one of the above features,
- * but if not, use WBINVD and issue a warning.
- */
- WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
wbinvd_on_all_cpus();
}
@@ -2281,7 +2280,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu);
if (vcpu->arch.guest_state_protected)
- sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
+ sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
+
__free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->sev_es.ghcb_sa_free)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index bd4c64b362d2..7e45d03cd018 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4620,6 +4620,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.mem_enc_ioctl = sev_mem_enc_ioctl,
.mem_enc_register_region = sev_mem_enc_register_region,
.mem_enc_unregister_region = sev_mem_enc_unregister_region,
+ .guest_memory_reclaimed = sev_guest_memory_reclaimed,
.vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
.vm_move_enc_context_from = sev_vm_move_enc_context_from,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f77a7d2d39dd..f76deff71002 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -609,6 +609,8 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
struct kvm_enc_region *range);
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
+void sev_guest_memory_reclaimed(struct kvm *kvm);
+
void pre_sev_run(struct vcpu_svm *svm, int cpu);
void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index f18744f7ff82..856c87563883 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4618,6 +4618,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
}
+ if (vmx->nested.update_vmcs01_apicv_status) {
+ vmx->nested.update_vmcs01_apicv_status = false;
+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ }
+
if ((vm_exit_reason != -1) &&
(enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
vmx->nested.need_vmcs12_to_shadow_sync = true;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index bc3f8512bb64..b82b6709d7a8 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -431,15 +431,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
pmc->counter += data - pmc_read_counter(pmc);
- if (pmc->perf_event && !pmc->is_paused)
- perf_event_period(pmc->perf_event,
- get_sample_period(pmc, data));
+ pmc_update_sample_period(pmc);
return 0;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc->counter += data - pmc_read_counter(pmc);
- if (pmc->perf_event && !pmc->is_paused)
- perf_event_period(pmc->perf_event,
- get_sample_period(pmc, data));
+ pmc_update_sample_period(pmc);
return 0;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 04d170c4b61e..d58b763df855 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4174,6 +4174,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (is_guest_mode(vcpu)) {
+ vmx->nested.update_vmcs01_apicv_status = true;
+ return;
+ }
+
pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
if (cpu_has_secondary_exec_ctrls()) {
if (kvm_vcpu_apicv_active(vcpu))
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 9c6bfcd84008..b98c7e96697a 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -183,6 +183,7 @@ struct nested_vmx {
bool change_vmcs01_virtual_apic_mode;
bool reload_vmcs01_apic_access_page;
bool update_vmcs01_cpu_dirty_logging;
+ bool update_vmcs01_apicv_status;
/*
* Enlightened VMCS has been enabled. It does not mean that L1 has to
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0c0ca599a353..4790f0d7d40b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2901,7 +2901,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
static void kvm_update_masterclock(struct kvm *kvm)
{
- kvm_hv_invalidate_tsc_page(kvm);
+ kvm_hv_request_tsc_page_update(kvm);
kvm_start_pvclock_update(kvm);
pvclock_update_vm_gtod_copy(kvm);
kvm_end_pvclock_update(kvm);
@@ -3113,8 +3113,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
offsetof(struct compat_vcpu_info, time));
if (vcpu->xen.vcpu_time_info_set)
kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
- if (!v->vcpu_idx)
- kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
+ kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
return 0;
}
@@ -6241,7 +6240,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
return -EINVAL;
- kvm_hv_invalidate_tsc_page(kvm);
+ kvm_hv_request_tsc_page_update(kvm);
kvm_start_pvclock_update(kvm);
pvclock_update_vm_gtod_copy(kvm);
@@ -8926,7 +8925,7 @@ int kvm_arch_init(void *opaque)
}
kvm_nr_uret_msrs = 0;
- r = kvm_mmu_module_init();
+ r = kvm_mmu_vendor_module_init();
if (r)
goto out_free_percpu;
@@ -8974,7 +8973,7 @@ void kvm_arch_exit(void)
cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops.hardware_enable = NULL;
- kvm_mmu_module_exit();
+ kvm_mmu_vendor_module_exit();
free_percpu(user_return_msrs);
kmem_cache_destroy(x86_emulator_cache);
#ifdef CONFIG_KVM_XEN
@@ -9112,7 +9111,7 @@ static void kvm_apicv_init(struct kvm *kvm)
if (!enable_apicv)
set_or_clear_apicv_inhibit(inhibits,
- APICV_INHIBIT_REASON_ABSENT, true);
+ APICV_INHIBIT_REASON_DISABLE, true);
}
static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9890,6 +9889,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+ static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
+}
+
static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))
@@ -10016,12 +10020,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
+ vcpu->run->system_event.ndata = 0;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
+ vcpu->run->system_event.ndata = 0;
r = 0;
goto out;
}
@@ -10098,7 +10104,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* Store vcpu->apicv_active before vcpu->mode. */
smp_store_release(&vcpu->mode, IN_GUEST_MODE);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
/*
* 1) We should set ->mode before checking ->requests. Please see
@@ -10129,7 +10135,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_wmb();
local_irq_enable();
preempt_enable();
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
r = 1;
goto cancel_injection;
}
@@ -10255,7 +10261,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
local_irq_enable();
preempt_enable();
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
/*
* Profile KVM exit RIPs:
@@ -10285,7 +10291,7 @@ out:
}
/* Called within kvm->srcu read side. */
-static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+static inline int vcpu_block(struct kvm_vcpu *vcpu)
{
bool hv_timer;
@@ -10301,12 +10307,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
if (hv_timer)
kvm_lapic_switch_to_sw_timer(vcpu);
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
kvm_vcpu_halt(vcpu);
else
kvm_vcpu_block(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
if (hv_timer)
kvm_lapic_switch_to_hv_timer(vcpu);
@@ -10348,7 +10354,6 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
static int vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
- struct kvm *kvm = vcpu->kvm;
vcpu->arch.l1tf_flush_l1d = true;
@@ -10356,7 +10361,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
- r = vcpu_block(kvm, vcpu);
+ r = vcpu_block(vcpu);
}
if (r <= 0)
@@ -10375,9 +10380,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
}
if (__xfer_to_guest_mode_work_pending()) {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
r = xfer_to_guest_mode_handle_work(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
if (r)
return r;
}
@@ -10388,12 +10393,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
- int r;
-
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
- return r;
+ return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
}
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
@@ -10485,7 +10485,6 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
- struct kvm *kvm = vcpu->kvm;
int r;
vcpu_load(vcpu);
@@ -10493,7 +10492,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_run->flags = 0;
kvm_load_guest_fpu(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) {
r = -EINTR;
@@ -10505,9 +10504,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_block(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm_vcpu_srcu_read_lock(vcpu);
if (kvm_apic_accept_events(vcpu) < 0) {
r = 0;
@@ -10568,7 +10567,7 @@ out:
if (kvm_run->kvm_valid_regs)
store_regs(vcpu);
post_kvm_run_save(vcpu);
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ kvm_vcpu_srcu_read_unlock(vcpu);
kvm_sigset_deactivate(vcpu);
vcpu_put(vcpu);
@@ -10986,6 +10985,9 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
struct kvm_vcpu *vcpu;
unsigned long i;
+ if (!enable_apicv)
+ return;
+
down_write(&kvm->arch.apicv_update_lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -11197,8 +11199,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
- if (kvm_apicv_activated(vcpu->kvm))
+
+ /*
+ * Defer evaluating inhibits until the vCPU is first run, as
+ * this vCPU will not get notified of any changes until this
+ * vCPU is visible to other vCPUs (marked online and added to
+ * the set of vCPUs). Opportunistically mark APICv active as
+ * VMX in particularly is highly unlikely to have inhibits.
+ * Ignore the current per-VM APICv state so that vCPU creation
+ * is guaranteed to run with a deterministic value, the request
+ * will ensure the vCPU gets the correct state before VM-Entry.
+ */
+ if (enable_apicv) {
vcpu->arch.apicv_active = true;
+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+ }
} else
static_branch_inc(&kvm_has_noapic_vcpu);
@@ -11996,8 +12011,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
+ if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
+ if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
+ return -EINVAL;
+
return kvm_alloc_memslot_metadata(kvm, new);
+ }
if (change == KVM_MR_FLAGS_ONLY)
memcpy(&new->arch, &old->arch, sizeof(old->arch));
@@ -12986,3 +13005,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
+
+static int __init kvm_x86_init(void)
+{
+ kvm_mmu_x86_module_init();
+ return 0;
+}
+module_init(kvm_x86_init);
+
+static void __exit kvm_x86_exit(void)
+{
+ /*
+ * If module_init() is implemented, module_exit() must also be
+ * implemented to allow module unload.
+ */
+}
+module_exit(kvm_x86_exit);
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 8ca5ecf16dc4..9dec1b38a98f 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -53,12 +53,12 @@
SYM_FUNC_START(copy_user_generic_unrolled)
ASM_STAC
cmpl $8,%edx
- jb 20f /* less then 8 bytes, go to byte copy loop */
+ jb .Lcopy_user_short_string_bytes
ALIGN_DESTINATION
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz .L_copy_short_string
+ jz copy_user_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -79,37 +79,11 @@ SYM_FUNC_START(copy_user_generic_unrolled)
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
-.L_copy_short_string:
- movl %edx,%ecx
- andl $7,%edx
- shrl $3,%ecx
- jz 20f
-18: movq (%rsi),%r8
-19: movq %r8,(%rdi)
- leaq 8(%rsi),%rsi
- leaq 8(%rdi),%rdi
- decl %ecx
- jnz 18b
-20: andl %edx,%edx
- jz 23f
- movl %edx,%ecx
-21: movb (%rsi),%al
-22: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz 21b
-23: xor %eax,%eax
- ASM_CLAC
- RET
+ jmp copy_user_short_string
30: shll $6,%ecx
addl %ecx,%edx
- jmp 60f
-40: leal (%rdx,%rcx,8),%edx
- jmp 60f
-50: movl %ecx,%edx
-60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
+ jmp .Lcopy_user_handle_tail
_ASM_EXTABLE_CPY(1b, 30b)
_ASM_EXTABLE_CPY(2b, 30b)
@@ -127,10 +101,6 @@ SYM_FUNC_START(copy_user_generic_unrolled)
_ASM_EXTABLE_CPY(14b, 30b)
_ASM_EXTABLE_CPY(15b, 30b)
_ASM_EXTABLE_CPY(16b, 30b)
- _ASM_EXTABLE_CPY(18b, 40b)
- _ASM_EXTABLE_CPY(19b, 40b)
- _ASM_EXTABLE_CPY(21b, 50b)
- _ASM_EXTABLE_CPY(22b, 50b)
SYM_FUNC_END(copy_user_generic_unrolled)
EXPORT_SYMBOL(copy_user_generic_unrolled)
@@ -191,7 +161,7 @@ EXPORT_SYMBOL(copy_user_generic_string)
SYM_FUNC_START(copy_user_enhanced_fast_string)
ASM_STAC
/* CPUs without FSRM should avoid rep movsb for short copies */
- ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM
+ ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM
movl %edx,%ecx
1: rep movsb
xorl %eax,%eax
@@ -244,6 +214,53 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
SYM_CODE_END(.Lcopy_user_handle_tail)
/*
+ * Finish memcpy of less than 64 bytes. #AC should already be set.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count (< 64)
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+SYM_CODE_START_LOCAL(copy_user_short_string)
+ movl %edx,%ecx
+ andl $7,%edx
+ shrl $3,%ecx
+ jz .Lcopy_user_short_string_bytes
+18: movq (%rsi),%r8
+19: movq %r8,(%rdi)
+ leaq 8(%rsi),%rsi
+ leaq 8(%rdi),%rdi
+ decl %ecx
+ jnz 18b
+.Lcopy_user_short_string_bytes:
+ andl %edx,%edx
+ jz 23f
+ movl %edx,%ecx
+21: movb (%rsi),%al
+22: movb %al,(%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz 21b
+23: xor %eax,%eax
+ ASM_CLAC
+ RET
+
+40: leal (%rdx,%rcx,8),%edx
+ jmp 60f
+50: movl %ecx,%edx /* ecx is zerorest also */
+60: jmp .Lcopy_user_handle_tail
+
+ _ASM_EXTABLE_CPY(18b, 40b)
+ _ASM_EXTABLE_CPY(19b, 40b)
+ _ASM_EXTABLE_CPY(21b, 50b)
+ _ASM_EXTABLE_CPY(22b, 50b)
+SYM_CODE_END(copy_user_short_string)
+
+/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination out of cache for more performance.
*
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index ecb2049c1273..b7dfd60243b7 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -48,6 +48,7 @@ SYM_FUNC_START(__put_user_1)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
+ ENDBR
ASM_STAC
1: movb %al,(%_ASM_CX)
xor %ecx,%ecx
@@ -62,6 +63,7 @@ SYM_FUNC_START(__put_user_2)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
+ ENDBR
ASM_STAC
2: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
@@ -76,6 +78,7 @@ SYM_FUNC_START(__put_user_4)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
+ ENDBR
ASM_STAC
3: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
@@ -90,6 +93,7 @@ SYM_FUNC_START(__put_user_8)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL)
+ ENDBR
ASM_STAC
4: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 5f87bab4fb8d..b2b2366885a2 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -31,6 +31,7 @@
.align RETPOLINE_THUNK_SIZE
SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
@@ -55,7 +56,6 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
.align RETPOLINE_THUNK_SIZE
SYM_CODE_START(__x86_indirect_thunk_array)
- ANNOTATE_NOENDBR // apply_retpolines
#define GEN(reg) THUNK reg
#include <asm/GEN-for-each-reg.h>
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0402a749f3a0..0ae6cf804197 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -119,7 +119,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
/* cache copy and flush to align dest */
if (!IS_ALIGNED(dest, 8)) {
- unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
+ size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
memcpy((void *) dest, (void *) source, len);
clean_cache_range((void *) dest, len);
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index abf5ed76e4b7..0656db33574d 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -638,17 +638,6 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
}
EXPORT_SYMBOL_GPL(lookup_address);
-/*
- * Lookup the page table entry for a virtual address in a given mm. Return a
- * pointer to the entry and the level of the mapping.
- */
-pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
- unsigned int *level)
-{
- return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
-}
-EXPORT_SYMBOL_GPL(lookup_address_in_mm);
-
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
unsigned int *level)
{
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 9bb1e2941179..b94f727251b6 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -467,7 +467,6 @@ static __init void xen_setup_pci_msi(void)
else
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
- pci_msi_ignore_mask = 1;
} else if (xen_hvm_domain()) {
xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs;
xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs;
@@ -481,6 +480,11 @@ static __init void xen_setup_pci_msi(void)
* in allocating the native domain and never use it.
*/
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
+ /*
+ * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
+ * controlled by the hypervisor.
+ */
+ pci_msi_ignore_mask = 1;
}
#else /* CONFIG_PCI_MSI */
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index 72c1e42d121d..7fe564eaf228 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -50,6 +50,7 @@
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
SYM_CODE_START_LOCAL(pvh_start_xen)
+ UNWIND_HINT_EMPTY
cld
lgdt (_pa(gdt))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 3822666fb73d..bb176c72891c 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -25,6 +25,7 @@
#include <asm/cpu.h>
#include <asm/mmu_context.h>
#include <asm/cpu_device_id.h>
+#include <asm/microcode.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -262,11 +263,18 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
x86_platform.restore_sched_clock_state();
mtrr_bp_restore();
perf_restore_debug_store();
- msr_restore_context(ctxt);
c = &cpu_data(smp_processor_id());
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
init_ia32_feat_ctl(c);
+
+ microcode_bsp_resume();
+
+ /*
+ * This needs to happen after the microcode has been updated upon resume
+ * because some of the MSRs are "emulated" in microcode.
+ */
+ msr_restore_context(ctxt);
}
/* Needed by apm.c */
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index ac17196e2518..3a2cd93bf059 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -45,6 +45,7 @@ SYM_CODE_END(hypercall_page)
__INIT
SYM_CODE_START(startup_xen)
UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR
cld
/* Clear .bss */
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 45cc0ae0af6f..c7b9f12896f2 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -29,7 +29,7 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lsave_cp_regs_cp##x: \
- xchal_cp##x##_store a2 a4 a5 a6 a7; \
+ xchal_cp##x##_store a2 a3 a4 a5 a6; \
jx a0; \
.endif
@@ -46,7 +46,7 @@
.if XTENSA_HAVE_COPROCESSOR(x); \
.align 4; \
.Lload_cp_regs_cp##x: \
- xchal_cp##x##_load a2 a4 a5 a6 a7; \
+ xchal_cp##x##_load a2 a3 a4 a5 a6; \
jx a0; \
.endif
diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
index 0dde21e0d3de..ad1841cecdfb 100644
--- a/arch/xtensa/kernel/jump_label.c
+++ b/arch/xtensa/kernel/jump_label.c
@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
{
struct patch *patch = data;
- if (atomic_inc_return(&patch->cpu_count) == 1) {
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
local_patch_text(patch->addr, patch->data, patch->sz);
atomic_inc(&patch->cpu_count);
} else {
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 81d7c7e8f7e9..10b79d3c74e0 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *);
static struct tty_driver *serial_driver;
static struct tty_port serial_port;
static DEFINE_TIMER(serial_timer, rs_poll);
-static DEFINE_SPINLOCK(timer_lock);
static int rs_open(struct tty_struct *tty, struct file * filp)
{
- spin_lock_bh(&timer_lock);
if (tty->count == 1)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- spin_unlock_bh(&timer_lock);
return 0;
}
static void rs_close(struct tty_struct *tty, struct file * filp)
{
- spin_lock_bh(&timer_lock);
if (tty->count == 1)
del_timer_sync(&serial_timer);
- spin_unlock_bh(&timer_lock);
}
@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused)
int rd = 1;
unsigned char c;
- spin_lock(&timer_lock);
-
while (simc_poll(0)) {
rd = simc_read(0, &c, 1);
if (rd <= 0)
@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused)
tty_flip_buffer_push(port);
if (rd)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- spin_unlock(&timer_lock);
}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 2e0dd68a3cbe..1f62dbdc521f 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -569,7 +569,7 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
struct bfq_entity *entity = &bfqq->entity;
struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
struct bfq_entity **entities = inline_entities;
- int depth, level;
+ int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
int class_idx = bfqq->ioprio_class - 1;
struct bfq_sched_data *sched_data;
unsigned long wsum;
@@ -578,15 +578,21 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
if (!entity->on_st_or_in_serv)
return false;
+retry:
+ spin_lock_irq(&bfqd->lock);
/* +1 for bfqq entity, root cgroup not included */
depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
- if (depth > BFQ_LIMIT_INLINE_DEPTH) {
+ if (depth > alloc_depth) {
+ spin_unlock_irq(&bfqd->lock);
+ if (entities != inline_entities)
+ kfree(entities);
entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
if (!entities)
return false;
+ alloc_depth = depth;
+ goto retry;
}
- spin_lock_irq(&bfqd->lock);
sched_data = entity->sched_data;
/* Gather our ancestors as we need to traverse them in reverse order */
level = 0;
diff --git a/block/bio.c b/block/bio.c
index cdd7b2915c53..4259125e16ab 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1598,7 +1598,7 @@ EXPORT_SYMBOL(bio_split);
void bio_trim(struct bio *bio, sector_t offset, sector_t size)
{
if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
- offset + size > bio->bi_iter.bi_size))
+ offset + size > bio_sectors(bio)))
return;
size <<= 9;
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 70a0a3d680a3..9bd670999d0a 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2322,7 +2322,17 @@ static void ioc_timer_fn(struct timer_list *timer)
iocg->hweight_donating = hwa;
iocg->hweight_after_donation = new_hwi;
list_add(&iocg->surplus_list, &surpluses);
- } else {
+ } else if (!iocg->abs_vdebt) {
+ /*
+ * @iocg doesn't have enough to donate. Reset
+ * its inuse to active.
+ *
+ * Don't reset debtors as their inuse's are
+ * owned by debt handling. This shouldn't affect
+ * donation calculuation in any meaningful way
+ * as @iocg doesn't have a meaningful amount of
+ * share anyway.
+ */
TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
iocg->inuse, iocg->active,
iocg->hweight_inuse, new_hwi);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ed3ed86f7dd2..84d749511f55 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -794,7 +794,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
#endif
if (unlikely(error && !blk_rq_is_passthrough(req) &&
- !(req->rq_flags & RQF_QUIET))) {
+ !(req->rq_flags & RQF_QUIET)) &&
+ !test_bit(GD_DEAD, &req->q->disk->state)) {
blk_print_req_error(req, error);
trace_block_rq_error(req, error, nr_bytes);
}
@@ -1130,14 +1131,7 @@ void blk_mq_start_request(struct request *rq)
trace_block_rq_issue(rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
- u64 start_time;
-#ifdef CONFIG_BLK_CGROUP
- if (rq->bio)
- start_time = bio_issue_time(&rq->bio->bi_issue);
- else
-#endif
- start_time = ktime_get_ns();
- rq->io_start_time_ns = start_time;
+ rq->io_start_time_ns = ktime_get_ns();
rq->stats_sectors = blk_rq_sectors(rq);
rq->rq_flags |= RQF_STATS;
rq_qos_issue(q, rq);
diff --git a/block/ioctl.c b/block/ioctl.c
index 4a86340133e4..f8703db99c73 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return compat_put_long(argp,
(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
case BLKGETSIZE:
- if (bdev_nr_sectors(bdev) > ~0UL)
+ if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
return -EFBIG;
return compat_put_ulong(argp, bdev_nr_sectors(bdev));
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 4556c86c3465..eb95e188d62b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,11 +96,6 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
- /* T40 can not handle C3 idle state */
- { set_max_cstate, "IBM ThinkPad T40", {
- DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
- DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
- (void *)2},
{},
};
@@ -795,7 +790,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
- drv->safe_state_index = count;
+ if (cx->type != ACPI_STATE_C3)
+ drv->safe_state_index = count;
}
/*
* Halt-induced C1 is not good for ->enter_s2idle, because it
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8351c5638880..f3b639e89dd8 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2295,6 +2295,7 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
{
int ret = 0;
struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *tmppf;
struct binder_ptr_fixup *pf =
list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
node);
@@ -2349,7 +2350,11 @@ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
list_del(&sgc->node);
kfree(sgc);
}
- BUG_ON(!list_empty(pf_head));
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ BUG_ON(pf->skip_size == 0);
+ list_del(&pf->node);
+ kfree(pf);
+ }
BUG_ON(!list_empty(sgc_head));
return ret > 0 ? -EINVAL : ret;
@@ -2486,6 +2491,9 @@ static int binder_translate_fd_array(struct list_head *pf_head,
struct binder_proc *proc = thread->proc;
int ret;
+ if (fda->num_fds == 0)
+ return 0;
+
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 0c5a51970fbf..014ccb0f45dc 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
switch(ap->port_no)
{
case 0:
+ if (!ap->ioaddr.bmdma_addr)
+ return ATA_CBL_PATA_UNK;
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1d6636ebaac5..f73b836047cf 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -667,6 +667,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
core_mask = &cpu_topology[cpu].llc_sibling;
}
+ /*
+ * For systems with no shared cpu-side LLC but with clusters defined,
+ * extend core_mask to cluster_siblings. The sched domain builder will
+ * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
+ */
+ if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
+ cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
+ core_mask = &cpu_topology[cpu].cluster_sibling;
+
return core_mask;
}
@@ -684,7 +693,7 @@ void update_siblings_masks(unsigned int cpuid)
for_each_online_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) {
cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index af6bea56f4e2..3fc3b5940bb3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
return -EPROBE_DEFER;
}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index e9d1efcda89b..ac6ad9ab67f9 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -152,9 +152,19 @@ static struct attribute *default_attrs[] = {
NULL
};
+static umode_t topology_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.bin_attrs = bin_attrs,
+ .is_visible = topology_is_visible,
.name = "topology"
};
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 519b6d38d4df..fdb81f2794cd 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -33,6 +33,22 @@ config BLK_DEV_FD
To compile this driver as a module, choose M here: the
module will be called floppy.
+config BLK_DEV_FD_RAWCMD
+ bool "Support for raw floppy disk commands (DEPRECATED)"
+ depends on BLK_DEV_FD
+ help
+ If you want to use actual physical floppies and expect to do
+ special low-level hardware accesses to them (access and use
+ non-standard formats, for example), then enable this.
+
+ Note that the code enabled by this option is rarely used and
+ might be unstable or insecure, and distros should not enable it.
+
+ Note: FDRAWCMD is deprecated and will be removed from the kernel
+ in the near future.
+
+ If unsure, say N.
+
config AMIGA_FLOPPY
tristate "Amiga floppy support"
depends on AMIGA
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8c647532e3ce..d5b9ff9bcbb2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2982,6 +2982,8 @@ static const char *drive_name(int type, int drive)
return "(null)";
}
+#ifdef CONFIG_BLK_DEV_FD_RAWCMD
+
/* raw commands */
static void raw_cmd_done(int flag)
{
@@ -3181,6 +3183,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param)
return ret;
}
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+ void __user *param)
+{
+ int ret;
+
+ pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n");
+
+ if (type)
+ return -EINVAL;
+ if (lock_fdc(drive))
+ return -EINTR;
+ set_floppy(drive);
+ ret = raw_cmd_ioctl(cmd, param);
+ if (ret == -EINTR)
+ return -EINTR;
+ process_fd_request();
+ return ret;
+}
+
+#else /* CONFIG_BLK_DEV_FD_RAWCMD */
+
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+ void __user *param)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif
+
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
@@ -3369,7 +3400,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
- int i;
int ret;
int size;
union inparam {
@@ -3520,16 +3550,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
outparam = &write_errors[drive];
break;
case FDRAWCMD:
- if (type)
- return -EINVAL;
- if (lock_fdc(drive))
- return -EINTR;
- set_floppy(drive);
- i = raw_cmd_ioctl(cmd, (void __user *)param);
- if (i == -EINTR)
- return -EINTR;
- process_fd_request();
- return i;
+ return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param);
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 05b1120e6623..c441a4972064 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
* Only fake timeouts need to execute blk_mq_complete_request() here.
*/
cmd->error = BLK_STS_TIMEOUT;
- if (cmd->fake_timeout)
+ if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
blk_mq_complete_request(rq);
return BLK_EH_DONE;
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index 5e0e4393ce4d..0cfe859a4ac4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -224,8 +224,12 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
if (error)
return error;
+ msi_lock_descs(dev);
if (msi_first_desc(dev, MSI_DESC_ALL))
- return -EINVAL;
+ error = -EINVAL;
+ msi_unlock_descs(dev);
+ if (error)
+ return error;
/*
* NOTE: Calling this function will trigger the invocation of the
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 60fbd42041dd..828c66bbaa67 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -352,8 +352,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
pdev = of_find_device_by_node(rd->dn);
if (!pdev) {
- dev_err(&pdev->dev,
- "Could not find platform device for '%pOF'\n",
+ pr_err("Could not find platform device for '%pOF'\n",
rd->dn);
ret = notifier_from_errno(-EINVAL);
@@ -370,7 +369,7 @@ static int of_weim_notify(struct notifier_block *nb, unsigned long action,
return ret;
}
-struct notifier_block weim_of_notifier = {
+static struct notifier_block weim_of_notifier = {
.notifier_call = of_weim_notify,
};
#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9527b7d63840..541ced27d941 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -1060,6 +1060,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)
* the intermediate restore kernel reinitializes MHI device with new
* context.
*/
+ flush_work(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
mhi_power_down(mhi_cntrl, true);
mhi_unprepare_after_power_down(mhi_cntrl);
@@ -1085,6 +1086,7 @@ static const struct dev_pm_ops mhi_pci_pm_ops = {
.resume = mhi_pci_resume,
.freeze = mhi_pci_freeze,
.thaw = mhi_pci_restore,
+ .poweroff = mhi_pci_freeze,
.restore = mhi_pci_restore,
#endif
};
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 4566e730ef2b..60b082fe2ed0 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -227,6 +227,8 @@ static struct sunxi_rsb_device *sunxi_rsb_device_create(struct sunxi_rsb *rsb,
dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev));
+ return rdev;
+
err_device_add:
put_device(&rdev->dev);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 54c0ee6dda30..7a1b1f9e4933 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -3232,13 +3232,27 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
*/
static int sysc_check_active_timer(struct sysc *ddata)
{
+ int error;
+
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
return 0;
+ /*
+ * Quirk for omap3 beagleboard revision A to B4 to use gpt12.
+ * Revision C and later are fixed with commit 23885389dbbb ("ARM:
+ * dts: Fix timer regression for beagleboard revision c"). This all
+ * can be dropped if we stop supporting old beagleboard revisions
+ * A to B4 at some point.
+ */
+ if (sysc_soc->soc == SOC_3430)
+ error = -ENXIO;
+ else
+ error = -EBUSY;
+
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
- return -ENXIO;
+ return error;
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index e15063d61460..4c9adb4f3d5d 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -318,6 +318,13 @@ static void crng_reseed(bool force)
* the resultant ChaCha state to the user, along with the second
* half of the block containing 32 bytes of random data that may
* be used; random_data_len may not be greater than 32.
+ *
+ * The returned ChaCha state contains within it a copy of the old
+ * key value, at index 4, so the state should always be zeroed out
+ * immediately after using in order to maintain forward secrecy.
+ * If the state cannot be erased in a timely manner, then it is
+ * safer to set the random_data parameter to &chacha_state[4] so
+ * that this function overwrites it before returning.
*/
static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
u32 chacha_state[CHACHA_STATE_WORDS],
@@ -523,8 +530,7 @@ EXPORT_SYMBOL(get_random_bytes);
static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
{
- ssize_t ret = 0;
- size_t len;
+ size_t len, left, ret = 0;
u32 chacha_state[CHACHA_STATE_WORDS];
u8 output[CHACHA_BLOCK_SIZE];
@@ -543,37 +549,40 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
* the user directly.
*/
if (nbytes <= CHACHA_KEY_SIZE) {
- ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
+ ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
goto out_zero_chacha;
}
- do {
+ for (;;) {
chacha20_block(chacha_state, output);
if (unlikely(chacha_state[12] == 0))
++chacha_state[13];
len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, output, len)) {
- ret = -EFAULT;
+ left = copy_to_user(buf, output, len);
+ if (left) {
+ ret += len - left;
break;
}
- nbytes -= len;
buf += len;
ret += len;
+ nbytes -= len;
+ if (!nbytes)
+ break;
BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
- if (!(ret % PAGE_SIZE) && nbytes) {
+ if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
- } while (nbytes);
+ }
memzero_explicit(output, sizeof(output));
out_zero_chacha:
memzero_explicit(chacha_state, sizeof(chacha_state));
- return ret;
+ return ret ? ret : -EFAULT;
}
/*
@@ -1016,7 +1025,7 @@ int __init rand_initialize(void)
*/
void add_device_randomness(const void *buf, size_t size)
{
- cycles_t cycles = random_get_entropy();
+ unsigned long cycles = random_get_entropy();
unsigned long flags, now = jiffies;
if (crng_init == 0 && size)
@@ -1047,8 +1056,7 @@ struct timer_rand_state {
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- cycles_t cycles = random_get_entropy();
- unsigned long flags, now = jiffies;
+ unsigned long cycles = random_get_entropy(), now = jiffies, flags;
long delta, delta2, delta3;
spin_lock_irqsave(&input_pool.lock, flags);
@@ -1337,8 +1345,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
void add_interrupt_randomness(int irq)
{
enum { MIX_INFLIGHT = 1U << 31 };
- cycles_t cycles = random_get_entropy();
- unsigned long now = jiffies;
+ unsigned long cycles = random_get_entropy(), now = jiffies;
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned int new_count;
@@ -1351,16 +1358,12 @@ void add_interrupt_randomness(int irq)
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
- if (sizeof(cycles) == 8)
+ if (sizeof(unsigned long) == 8) {
irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
- else {
+ irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+ } else {
irq_data.u32[0] = cycles ^ irq;
irq_data.u32[1] = now;
- }
-
- if (sizeof(unsigned long) == 8)
- irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
- else {
irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
irq_data.u32[3] = get_reg(fast_pool, regs);
}
@@ -1407,7 +1410,7 @@ static void entropy_timer(struct timer_list *t)
static void try_to_generate_entropy(void)
{
struct {
- cycles_t cycles;
+ unsigned long cycles;
struct timer_list timer;
} stack;
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index aa1561b773d6..070c3b896559 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -11,20 +11,48 @@
#include <dt-bindings/clock/microchip,mpfs-clock.h>
/* address offset of control registers */
+#define REG_MSSPLL_REF_CR 0x08u
+#define REG_MSSPLL_POSTDIV_CR 0x10u
+#define REG_MSSPLL_SSCG_2_CR 0x2Cu
#define REG_CLOCK_CONFIG_CR 0x08u
+#define REG_RTC_CLOCK_CR 0x0Cu
#define REG_SUBBLK_CLOCK_CR 0x84u
#define REG_SUBBLK_RESET_CR 0x88u
+#define MSSPLL_FBDIV_SHIFT 0x00u
+#define MSSPLL_FBDIV_WIDTH 0x0Cu
+#define MSSPLL_REFDIV_SHIFT 0x08u
+#define MSSPLL_REFDIV_WIDTH 0x06u
+#define MSSPLL_POSTDIV_SHIFT 0x08u
+#define MSSPLL_POSTDIV_WIDTH 0x07u
+#define MSSPLL_FIXED_DIV 4u
+
struct mpfs_clock_data {
void __iomem *base;
+ void __iomem *msspll_base;
struct clk_hw_onecell_data hw_data;
};
+struct mpfs_msspll_hw_clock {
+ void __iomem *base;
+ unsigned int id;
+ u32 reg_offset;
+ u32 shift;
+ u32 width;
+ u32 flags;
+ struct clk_hw hw;
+ struct clk_init_data init;
+};
+
+#define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw)
+
struct mpfs_cfg_clock {
const struct clk_div_table *table;
unsigned int id;
+ u32 reg_offset;
u8 shift;
u8 width;
+ u8 flags;
};
struct mpfs_cfg_hw_clock {
@@ -55,7 +83,7 @@ struct mpfs_periph_hw_clock {
*/
static DEFINE_SPINLOCK(mpfs_clk_lock);
-static const struct clk_parent_data mpfs_cfg_parent[] = {
+static const struct clk_parent_data mpfs_ext_ref[] = {
{ .index = 0 },
};
@@ -69,6 +97,86 @@ static const struct clk_div_table mpfs_div_ahb_table[] = {
{ 0, 0 }
};
+/*
+ * The only two supported reference clock frequencies for the PolarFire SoC are
+ * 100 and 125 MHz, as the rtc reference is required to be 1 MHz.
+ * It therefore only needs to have divider table entries corresponding to
+ * divide by 100 and 125.
+ */
+static const struct clk_div_table mpfs_div_rtcref_table[] = {
+ { 100, 100 }, { 125, 125 },
+ { 0, 0 }
+};
+
+static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw);
+ void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset;
+ void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR;
+ void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR;
+ u32 mult, ref_div, postdiv;
+
+ mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT;
+ mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH);
+ ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT;
+ ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH);
+ postdiv = readl_relaxed(postdiv_addr) >> MSSPLL_POSTDIV_SHIFT;
+ postdiv &= clk_div_mask(MSSPLL_POSTDIV_WIDTH);
+
+ return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv);
+}
+
+static const struct clk_ops mpfs_clk_msspll_ops = {
+ .recalc_rate = mpfs_clk_msspll_recalc_rate,
+};
+
+#define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \
+ .id = _id, \
+ .shift = _shift, \
+ .width = _width, \
+ .reg_offset = _offset, \
+ .flags = _flags, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_msspll_ops, 0), \
+}
+
+static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = {
+ CLK_PLL(CLK_MSSPLL, "clk_msspll", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT,
+ MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR),
+};
+
+static int mpfs_clk_register_msspll(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hw,
+ void __iomem *base)
+{
+ msspll_hw->base = base;
+
+ return devm_clk_hw_register(dev, &msspll_hw->hw);
+}
+
+static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws,
+ unsigned int num_clks, struct mpfs_clock_data *data)
+{
+ void __iomem *base = data->msspll_base;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_clks; i++) {
+ struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i];
+
+ ret = mpfs_clk_register_msspll(dev, msspll_hw, base);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register msspll id: %d\n",
+ CLK_MSSPLL);
+
+ data->hw_data.hws[msspll_hw->id] = &msspll_hw->hw;
+ }
+
+ return 0;
+}
+
+/*
+ * "CFG" clocks
+ */
+
static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
@@ -76,10 +184,10 @@ static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long p
void __iomem *base_addr = cfg_hw->sys_base;
u32 val;
- val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR) >> cfg->shift;
+ val = readl_relaxed(base_addr + cfg->reg_offset) >> cfg->shift;
val &= clk_div_mask(cfg->width);
- return prate / (1u << val);
+ return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
}
static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
@@ -105,11 +213,10 @@ static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned
return divider_setting;
spin_lock_irqsave(&mpfs_clk_lock, flags);
-
- val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR);
+ val = readl_relaxed(base_addr + cfg->reg_offset);
val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift);
val |= divider_setting << cfg->shift;
- writel_relaxed(val, base_addr + REG_CLOCK_CONFIG_CR);
+ writel_relaxed(val, base_addr + cfg->reg_offset);
spin_unlock_irqrestore(&mpfs_clk_lock, flags);
@@ -122,19 +229,33 @@ static const struct clk_ops mpfs_clk_cfg_ops = {
.set_rate = mpfs_cfg_clk_set_rate,
};
-#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags) { \
- .cfg.id = _id, \
- .cfg.shift = _shift, \
- .cfg.width = _width, \
- .cfg.table = _table, \
- .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_cfg_ops, \
- _flags), \
+#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
+ .cfg.id = _id, \
+ .cfg.shift = _shift, \
+ .cfg.width = _width, \
+ .cfg.table = _table, \
+ .cfg.reg_offset = _offset, \
+ .cfg.flags = _flags, \
+ .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
- CLK_CFG(CLK_CPU, "clk_cpu", mpfs_cfg_parent, 0, 2, mpfs_div_cpu_axi_table, 0),
- CLK_CFG(CLK_AXI, "clk_axi", mpfs_cfg_parent, 2, 2, mpfs_div_cpu_axi_table, 0),
- CLK_CFG(CLK_AHB, "clk_ahb", mpfs_cfg_parent, 4, 2, mpfs_div_ahb_table, 0),
+ CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0,
+ REG_CLOCK_CONFIG_CR),
+ CLK_CFG(CLK_AXI, "clk_axi", "clk_msspll", 2, 2, mpfs_div_cpu_axi_table, 0,
+ REG_CLOCK_CONFIG_CR),
+ CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0,
+ REG_CLOCK_CONFIG_CR),
+ {
+ .cfg.id = CLK_RTCREF,
+ .cfg.shift = 0,
+ .cfg.width = 12,
+ .cfg.table = mpfs_div_rtcref_table,
+ .cfg.reg_offset = REG_RTC_CLOCK_CR,
+ .cfg.flags = CLK_DIVIDER_ONE_BASED,
+ .hw.init =
+ CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0),
+ }
};
static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw,
@@ -160,13 +281,17 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
cfg_hw->cfg.id);
- id = cfg_hws[i].cfg.id;
+ id = cfg_hw->cfg.id;
data->hw_data.hws[id] = &cfg_hw->hw;
}
return 0;
}
+/*
+ * peripheral clocks - devices connected to axi or ahb buses.
+ */
+
static int mpfs_periph_clk_enable(struct clk_hw *hw)
{
struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
@@ -200,10 +325,6 @@ static void mpfs_periph_clk_disable(struct clk_hw *hw)
spin_lock_irqsave(&mpfs_clk_lock, flags);
- reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR);
- val = reg | (1u << periph->shift);
- writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR);
-
reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR);
val = reg & ~(1u << periph->shift);
writel_relaxed(val, base_addr + REG_SUBBLK_CLOCK_CR);
@@ -249,8 +370,10 @@ static const struct clk_ops mpfs_periph_clk_ops = {
* trap handler
* - CLK_MMUART0: reserved by the hss
* - CLK_DDRC: provides clock to the ddr subsystem
- * - CLK_FICx: these provide clocks for sections of the fpga fabric, disabling them would
- * cause the fabric to go into reset
+ * - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect)
+ * clock domain crossers which provide the interface to the FPGA fabric. Disabling them
+ * causes the FPGA fabric to go into reset.
+ * - CLK_ATHENA: The athena clock is FIC4, which is reserved for the Athena TeraFire.
*/
static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
@@ -258,7 +381,7 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_MAC0, "clk_periph_mac0", PARENT_CLK(AHB), 1, 0),
CLK_PERIPH(CLK_MAC1, "clk_periph_mac1", PARENT_CLK(AHB), 2, 0),
CLK_PERIPH(CLK_MMC, "clk_periph_mmc", PARENT_CLK(AHB), 3, 0),
- CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(AHB), 4, 0),
+ CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(RTCREF), 4, 0),
CLK_PERIPH(CLK_MMUART0, "clk_periph_mmuart0", PARENT_CLK(AHB), 5, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_MMUART1, "clk_periph_mmuart1", PARENT_CLK(AHB), 6, 0),
CLK_PERIPH(CLK_MMUART2, "clk_periph_mmuart2", PARENT_CLK(AHB), 7, 0),
@@ -277,11 +400,11 @@ static struct mpfs_periph_hw_clock mpfs_periph_clks[] = {
CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0),
CLK_PERIPH(CLK_GPIO2, "clk_periph_gpio2", PARENT_CLK(AHB), 22, 0),
CLK_PERIPH(CLK_DDRC, "clk_periph_ddrc", PARENT_CLK(AHB), 23, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AHB), 24, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AHB), 25, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AHB), 26, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AHB), 27, CLK_IS_CRITICAL),
- CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AHB), 28, 0),
+ CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AXI), 24, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AXI), 25, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AXI), 26, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AXI), 27, CLK_IS_CRITICAL),
+ CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AXI), 28, CLK_IS_CRITICAL),
CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0),
};
@@ -322,8 +445,9 @@ static int mpfs_clk_probe(struct platform_device *pdev)
unsigned int num_clks;
int ret;
- /* CLK_RESERVED is not part of cfg_clks nor periph_clks, so add 1 */
- num_clks = ARRAY_SIZE(mpfs_cfg_clks) + ARRAY_SIZE(mpfs_periph_clks) + 1;
+ /* CLK_RESERVED is not part of clock arrays, so add 1 */
+ num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_cfg_clks)
+ + ARRAY_SIZE(mpfs_periph_clks) + 1;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL);
if (!clk_data)
@@ -333,8 +457,17 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (IS_ERR(clk_data->base))
return PTR_ERR(clk_data->base);
+ clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(clk_data->msspll_base))
+ return PTR_ERR(clk_data->msspll_base);
+
clk_data->hw_data.num = num_clks;
+ ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks),
+ clk_data);
+ if (ret)
+ return ret;
+
ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index f675fd969c4d..e9c357309fd9 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -818,7 +818,7 @@ EXPORT_SYMBOL_GPL(clk_pixel_ops);
static int clk_gfx3d_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_rate_request parent_req = { };
+ struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
struct clk_hw *xo, *p0, *p1, *p2;
unsigned long p0_rate;
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 8a10bade7e0d..2f3ddc908ebd 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -241,6 +241,7 @@ static struct clk_init_data rtc_32k_init_data = {
.ops = &ccu_mux_ops,
.parent_hws = rtc_32k_parents,
.num_parents = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */
+ .flags = CLK_IS_CRITICAL,
};
static struct ccu_mux rtc_32k_clk = {
@@ -346,6 +347,7 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
.compatible = "allwinner,sun50i-r329-rtc",
.data = &sun50i_r329_rtc_ccu_data,
},
+ {},
};
int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 542b31d6e96d..636bcf2439ef 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -109,6 +109,8 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -EINVAL;
/* one clock/reset pair per word */
count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH);
data->membase = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index f9d593ff4718..0253731d6d25 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -24,13 +24,17 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
+#define GT_IRQ_STATUS BIT(2)
+
#define HZ_PER_KHZ 1000
struct qcom_cpufreq_soc_data {
u32 reg_enable;
+ u32 reg_domain_state;
u32 reg_dcvs_ctrl;
u32 reg_freq_lut;
u32 reg_volt_lut;
+ u32 reg_intr_clr;
u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
@@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
-static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
{
- unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
+ unsigned int lval;
+
+ if (data->soc_data->reg_current_vote)
+ lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
+ else
+ lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
- return (val & 0x3FF) * 19200;
+ return lval * xo_rate;
}
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
struct cpufreq_policy *policy = data->policy;
- int cpu = cpumask_first(policy->cpus);
+ int cpu = cpumask_first(policy->related_cpus);
struct device *dev = get_cpu_device(cpu);
unsigned long freq_hz, throttled_freq;
struct dev_pm_opp *opp;
- unsigned int freq;
/*
* Get the h/w throttled frequency, normalize it using the
* registered opp table and use it to calculate thermal pressure.
*/
- freq = qcom_lmh_get_throttle_freq(data);
- freq_hz = freq * HZ_PER_KHZ;
+ freq_hz = qcom_lmh_get_throttle_freq(data);
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
- dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+ if (IS_ERR(opp)) {
+ dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
+ } else {
+ throttled_freq = freq_hz / HZ_PER_KHZ;
- throttled_freq = freq_hz / HZ_PER_KHZ;
+ /* Update thermal pressure (the boost frequencies are accepted) */
+ arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
- /* Update thermal pressure (the boost frequencies are accepted) */
- arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ dev_pm_opp_put(opp);
+ }
/*
* In the unlikely case policy is unregistered do not enable
@@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
disable_irq_nosync(c_data->throttle_irq);
schedule_delayed_work(&c_data->throttle_work, 0);
+ if (c_data->soc_data->reg_intr_clr)
+ writel_relaxed(GT_IRQ_STATUS,
+ c_data->base + c_data->soc_data->reg_intr_clr);
+
return IRQ_HANDLED;
}
@@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
static const struct qcom_cpufreq_soc_data epss_soc_data = {
.reg_enable = 0x0,
+ .reg_domain_state = 0x20,
.reg_dcvs_ctrl = 0xb0,
.reg_freq_lut = 0x100,
.reg_volt_lut = 0x200,
+ .reg_intr_clr = 0x308,
.reg_perf_state = 0x320,
.lut_row_size = 4,
};
@@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
return 0;
}
-static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ int ret;
+
+ ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+ data->irq_name, data->throttle_irq);
+
+ return ret;
+}
+
+static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
if (data->throttle_irq <= 0)
- return;
+ return 0;
mutex_lock(&data->throttle_lock);
data->cancel_throttle = true;
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
+ irq_set_affinity_hint(data->throttle_irq, NULL);
+
+ return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
free_irq(data->throttle_irq, data);
}
@@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
+ .online = qcom_cpufreq_hw_cpu_online,
+ .offline = qcom_cpufreq_hw_cpu_offline,
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 2deed8d8773f..75e1bf3a08f7 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
return -ENOMEM;
ret = sun50i_cpufreq_get_efuse(&speed);
- if (ret)
+ if (ret) {
+ kfree(opp_tables);
return ret;
+ }
snprintf(name, MAX_NAME_LEN, "speed%d", speed);
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index b459eda2cd37..5c852e671992 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <asm/cpuidle.h>
#include <asm/sbi.h>
+#include <asm/smp.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 1476156af74b..def564d1e8fa 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
- struct at_xdmac_desc *desc, *_desc;
+ struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
int residue, retry;
@@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* microblock.
*/
descs_list = &desc->descs_list;
- list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
- residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
- if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
+ dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
+ residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
+ if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
+ desc = iter;
break;
+ }
}
residue += cur_ubc << dwidth;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 329fc2e57b70..33bc1e6c4cf2 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -414,14 +414,18 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
+
#ifdef CONFIG_64BIT
- SET_CH_64(dw, chan->dir, chan->id, llp.reg,
- chunk->ll_region.paddr);
+ /* llp is not aligned on 64bit -> keep 32bit accesses */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
#else /* CONFIG_64BIT */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chunk->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chunk->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chunk->ll_region.paddr));
#endif /* CONFIG_64BIT */
}
/* Doorbell */
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 3061fe857d69..f652da6ab47d 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
{
lockdep_assert_held(&wq->wq_lock);
- idxd_wq_disable_cleanup(wq);
wq->size = 0;
wq->group = NULL;
}
@@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
- idxd_wq_device_reset_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
+ idxd_wq_device_reset_cleanup(wq);
}
}
void idxd_device_clear_state(struct idxd_device *idxd)
{
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return;
+
idxd_groups_clear_state(idxd);
idxd_engines_clear_state(idxd);
idxd_device_wqs_clear_state(idxd);
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index e289fd48711a..c01db23e3333 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
*/
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
{
- int rc, retries = 0;
+ unsigned int retries = wq->enqcmds_retries;
+ int rc;
do {
rc = enqcmds(portal, desc);
if (rc == 0)
break;
cpu_relax();
- } while (retries++ < wq->enqcmds_retries);
+ } while (retries--);
return rc;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 7e19ab92b61a..dfd549685c46 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
u64 xfer_size;
int rc;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
@@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
u64 batch_size;
int rc;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 70c0aa931ddf..6196a7b3956b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
s32 per_2_firi_addr;
s32 mcu_2_firi_addr;
s32 uart_2_per_addr;
- s32 uart_2_mcu_ram_addr;
+ s32 uart_2_mcu_addr;
s32 per_2_app_addr;
s32 mcu_2_app_addr;
s32 per_2_per_addr;
s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_ram_addr;
+ s32 uartsh_2_mcu_addr;
s32 per_2_shp_addr;
s32 mcu_2_shp_addr;
s32 ata_2_mcu_addr;
@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr;
- s32 uart_2_mcu_addr;
- s32 uartsh_2_mcu_addr;
+ s32 uart_2_mcu_rom_addr;
+ s32 uartsh_2_mcu_rom_addr;
/* End of v3 array */
s32 mcu_2_zqspi_addr;
/* End of v4 array */
@@ -1796,17 +1796,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
/*
- * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
- * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
- * to be compatible with legacy freescale/nxp sdma firmware, and they
- * are located in the bottom part of sdma_script_start_addrs which are
- * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
+ * For compatibility with NXP internal legacy kernel before 4.19 which
+ * is based on uart ram script and mainline kernel based on uart rom
+ * script, both uart ram/rom scripts are present in newer sdma
+ * firmware. Use the rom versions if they are present (V3 or newer).
*/
- if (addr->uart_2_mcu_addr)
- sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
- if (addr->uartsh_2_mcu_addr)
- sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
-
+ if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
+ if (addr->uart_2_mcu_rom_addr)
+ sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
+ if (addr->uartsh_2_mcu_rom_addr)
+ sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
+ }
}
static void sdma_load_firmware(const struct firmware *fw, void *context)
@@ -1885,7 +1885,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
u32 reg, val, shift, num_map, i;
int ret = 0;
- if (IS_ERR(np) || IS_ERR(gpr_np))
+ if (IS_ERR(np) || !gpr_np)
goto out;
event_remap = of_find_property(np, propname, NULL);
@@ -1933,7 +1933,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
}
out:
- if (!IS_ERR(gpr_np))
+ if (gpr_np)
of_node_put(gpr_np);
return ret;
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index 375e7e647df6..a1517ef1f4a0 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
unsigned int status;
int ret;
- ret = pm_runtime_get_sync(mtkd->ddev.dev);
+ ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
if (ret < 0) {
pm_runtime_put_noidle(chan->device->dev);
return ret;
@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
ret = readx_poll_timeout(readl, c->base + VFF_EN,
status, !status, 10, 100);
if (ret)
- return ret;
+ goto err_pm;
ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
if (ret < 0) {
dev_err(chan->device->dev, "Can't request dma IRQ\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_pm;
}
if (mtkd->support_33bits)
mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
+err_pm:
+ pm_runtime_put_noidle(mtkd->ddev.dev);
return ret;
}
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index f05ff02c0656..40b1abeca856 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -164,6 +164,11 @@
#define ECC_STAT_CECNT_SHIFT 8
#define ECC_STAT_BITNUM_MASK 0x7F
+/* ECC error count register definitions */
+#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
+#define ECC_ERRCNT_UECNT_SHIFT 16
+#define ECC_ERRCNT_CECNT_MASK 0xFFFF
+
/* DDR QOS Interrupt register definitions */
#define DDR_QOS_IRQ_STAT_OFST 0x20200
#define DDR_QOSUE_MASK 0x4
@@ -423,15 +428,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
base = priv->baseaddr;
p = &priv->stat;
+ regval = readl(base + ECC_ERRCNT_OFST);
+ p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
+ p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
+ if (!p->ce_cnt)
+ goto ue_err;
+
regval = readl(base + ECC_STAT_OFST);
if (!regval)
return 1;
- p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
- p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
- if (!p->ce_cnt)
- goto ue_err;
-
p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
regval = readl(base + ECC_CEADDR0_OFST);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index cf6fed6dec77..45600acc0f45 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -49,7 +49,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct {
__le32 value_low;
__le32 value_high;
- } rate[0];
+ } rate[];
#define RATE_TO_U64(X) \
({ \
typeof(X) x = (X); \
@@ -210,7 +210,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
if (rate_discrete && rate) {
clk->list.num_rates = tot_rate_cnt;
- sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+ rate_cmp_func, NULL);
}
clk->rate_discrete = rate_discrete;
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 46118300a4d1..e17c6568344d 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -679,7 +679,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
if (IS_ERR(xfer)) {
- scmi_clear_channel(info, cinfo);
+ if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+ scmi_clear_channel(info, cinfo);
return;
}
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 734f1eeee161..8302a2b4aeeb 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -405,8 +405,8 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
return 0;
}
-static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
- struct scmi_xfer *xfer)
+static struct scmi_shared_mem __iomem *
+get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
{
if (!chan)
return NULL;
@@ -419,7 +419,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
int ret;
mutex_lock(&channel->mu);
@@ -436,7 +436,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_optee_channel *channel = cinfo->transport_info;
- struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+ struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
shmem_fetch_response(shmem, xfer);
}
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index e48108e694f8..7dad6f57d970 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
ctl->alg_region = *alg_region;
if (subname && dsp->fw_ver >= 2) {
ctl->subname_len = subname_len;
- ctl->subname = kmemdup(subname,
- strlen(subname) + 1, GFP_KERNEL);
+ ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
if (!ctl->subname) {
ret = -ENOMEM;
goto err_ctl;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 8e5d87984a48..41c31b10ae84 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- bitmap_copy(bits, chip->value_map, gc->ngpio);
+ bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
mutex_unlock(&chip->lock);
return 0;
@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
struct gpio_sim_chip *chip = gpiochip_get_data(gc);
mutex_lock(&chip->lock);
- bitmap_copy(chip->value_map, bits, gc->ngpio);
+ bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
mutex_unlock(&chip->lock);
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index a5495ad31c9c..c2523ac26fac 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
* controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
{
struct gpio_chip *chip;
acpi_handle handle;
@@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
* as it is intended for use outside of the GPIO layer (in a similar fashion to
* gpiod_get_index() for example) it also holds a reference to the GPIO device.
*/
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label)
{
struct gpio_desc *gpio;
int ret;
@@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
return desc;
}
-static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
{
const char *controller, *pin_str;
- int len, pin;
+ unsigned int pin;
char *endp;
+ int len;
controller = ignore_wake;
while (controller) {
@@ -354,13 +355,13 @@ err:
static bool acpi_gpio_irq_is_wake(struct device *parent,
struct acpi_resource_gpio *agpio)
{
- int pin = agpio->pin_table[0];
+ unsigned int pin = agpio->pin_table[0];
if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
return false;
if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
- dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+ dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
return false;
}
@@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
struct acpi_gpio_event *event;
irq_handler_t handler = NULL;
struct gpio_desc *desc;
- int ret, pin, irq;
+ unsigned int pin;
+ int ret, irq;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
pin = agpio->pin_table[0];
if (pin <= 255) {
- char ev_name[5];
- sprintf(ev_name, "_%c%02hhX",
+ char ev_name[8];
+ sprintf(ev_name, "_%c%02X",
agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
pin);
if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
@@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
length = min_t(u16, agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
- int pin = agpio->pin_table[i];
+ unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
bool found;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 085348e08986..b7694171655c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1601,8 +1601,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gpiochip_set_irq_hooks(gc);
- acpi_gpiochip_request_interrupts(gc);
-
/*
* Using barrier() here to prevent compiler from reordering
* gc->irq.initialized before initialization of above
@@ -1612,6 +1610,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
gc->irq.initialized = true;
+ acpi_gpiochip_request_interrupts(gc);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 40e2c6e2df79..b525f9be9326 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -49,7 +49,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \
atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
- amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
@@ -58,7 +58,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o \
- amdgpu_eeprom.o amdgpu_mca.o
+ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index cdf0818088b3..bffd24845765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -179,7 +179,7 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern uint amdgpu_pcie_gen_cap;
extern uint amdgpu_pcie_lane_cap;
-extern uint amdgpu_cg_mask;
+extern u64 amdgpu_cg_mask;
extern uint amdgpu_pg_mask;
extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
@@ -322,7 +322,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags);
+ u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
@@ -860,7 +860,7 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
struct amdgpu_pm pm;
- u32 cg_flags;
+ u64 cg_flags;
u32 pg_flags;
/* nbio */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 6ca1db3c243f..64c6664b34e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -724,3 +724,11 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo
else if (reset)
amdgpu_amdkfd_gpu_reset(adev);
}
+
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev)
+{
+ if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status)
+ return adev->gfx.ras->query_utcl2_poison_status(adev);
+ else
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 4cb14c2fe53f..f8b9f27adcf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -273,9 +273,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
-int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
- struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
- bool *table_freed);
+int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
+ struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
@@ -301,6 +300,7 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
void amdgpu_amdkfd_block_mmu_notifications(void *p);
int amdgpu_amdkfd_criu_resume(void *p);
+bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 3dc5ab2764ff..80b6b8e432fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1058,8 +1058,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
- struct amdgpu_sync *sync,
- bool *table_freed)
+ struct amdgpu_sync *sync)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
@@ -1070,7 +1069,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
/* Update the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
@@ -1082,8 +1081,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
- bool no_update_pte,
- bool *table_freed)
+ bool no_update_pte)
{
int ret;
@@ -1100,7 +1098,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte)
return 0;
- ret = update_gpuvm_pte(mem, entry, sync, table_freed);
+ ret = update_gpuvm_pte(mem, entry, sync);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
@@ -1710,7 +1708,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem,
- void *drm_priv, bool *table_freed)
+ void *drm_priv)
{
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
int ret;
@@ -1797,7 +1795,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
- is_invalid_userptr, table_freed);
+ is_invalid_userptr);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
@@ -2265,7 +2263,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
@@ -2476,7 +2474,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
- ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 8de283997769..01853431249d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
@@ -688,6 +690,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
if (parser->ctx) {
+ mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
@@ -785,22 +788,22 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
if (r)
return r;
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -815,11 +818,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
if (r)
return r;
}
@@ -832,7 +835,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
+ r = amdgpu_sync_fence(&p->job->sync, vm->last_update);
if (r)
return r;
@@ -1136,6 +1139,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
{
int i, r;
+ /* TODO: Investigate why we still need the context lock */
+ mutex_unlock(&p->ctx->lock);
+
for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
@@ -1146,32 +1152,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r)
- return r;
+ goto out;
break;
}
}
- return 0;
+out:
+ mutex_lock(&p->ctx->lock);
+ return r;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1332,6 +1340,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
+
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 5981c7d9bd48..8f0e6d93bb9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock);
+ mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
@@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
drm_dev_exit(idx);
}
+ mutex_destroy(&ctx->lock);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index d0cbfcea90f7..142f2f87d44c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -49,6 +49,7 @@ struct amdgpu_ctx {
bool preamble_presented;
int32_t init_priority;
int32_t override_priority;
+ struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5d04d24a0d5f..eedb12f6b8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -38,6 +38,7 @@
#include "amdgpu_umr.h"
#include "amdgpu_reset.h"
+#include "amdgpu_psp_ta.h"
#if defined(CONFIG_DEBUG_FS)
@@ -730,7 +731,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return -ENOMEM;
/* version, increment each time something is added */
- config[no_regs++] = 4;
+ config[no_regs++] = 5;
config[no_regs++] = adev->gfx.config.max_shader_engines;
config[no_regs++] = adev->gfx.config.max_tile_pipes;
config[no_regs++] = adev->gfx.config.max_cu_per_sh;
@@ -757,8 +758,8 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
/* rev==1 */
config[no_regs++] = adev->rev_id;
- config[no_regs++] = adev->pg_flags;
- config[no_regs++] = adev->cg_flags;
+ config[no_regs++] = lower_32_bits(adev->pg_flags);
+ config[no_regs++] = lower_32_bits(adev->cg_flags);
/* rev==2 */
config[no_regs++] = adev->family;
@@ -773,6 +774,10 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
/* rev==4 APU flag */
config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
+ /* rev==5 PG/CG flag upper 32bit */
+ config[no_regs++] = upper_32_bits(adev->pg_flags);
+ config[no_regs++] = upper_32_bits(adev->cg_flags);
+
while (size && (*pos < no_regs * 4)) {
uint32_t value;
@@ -1763,6 +1768,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
amdgpu_debugfs_firmware_init(adev);
+ amdgpu_ta_if_debugfs_init(adev);
#if defined(CONFIG_DRM_AMD_DC)
if (amdgpu_device_has_dc_support(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 49f734137f15..94666c2417c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1703,7 +1703,7 @@ int amdgpu_device_ip_set_powergating_state(void *dev,
* clockgating is enabled.
*/
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int i;
@@ -3700,7 +3700,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* enable PCIE atomic ops */
if (amdgpu_sriov_vf(adev))
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
- adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
+ adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
else
adev->have_atomics_support =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 6b25837955c4..1538b2dbfff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -40,7 +40,7 @@ struct amdgpu_df_funcs {
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
bool enable);
void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
+ u64 *flags);
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index e4fcbb385a62..aaf2fc6b1a82 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -430,7 +430,7 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
}
}
next_ip:
- ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
}
@@ -798,7 +798,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
res = kobject_add(&ip_hw_instance->kobj, NULL,
"%d", ip_hw_instance->num_instance);
next_ip:
- ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
@@ -1063,7 +1063,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
}
next_ip:
- ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
@@ -1113,7 +1113,7 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
*revision = ip->revision;
return 0;
}
- ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
}
}
@@ -1150,13 +1150,6 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
}
- if ((adev->pdev->device == 0x731E &&
- (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) ||
- (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) ||
- (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) {
- adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
- adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
- }
}
union gc_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 7a6908d71820..17c9bbe0cbc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -41,6 +41,11 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_vblank.h>
+static int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
static void amdgpu_display_flip_callback(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -113,8 +118,9 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpu_crtc->crtc_id, amdgpu_crtc, work);
+ drm_dbg_vbl(adev_to_drm(adev),
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
@@ -1038,35 +1044,11 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
return r;
}
-int amdgpu_display_gem_fb_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
-
- ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
- if (ret)
- goto err;
-
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto err;
-
- return 0;
-err:
- drm_dbg_kms(dev, "Failed to init gem fb: %d\n", ret);
- rfb->base.obj[0] = NULL;
- return ret;
-}
-
-int amdgpu_display_gem_fb_verify_and_init(
- struct drm_device *dev, struct amdgpu_framebuffer *rfb,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
int ret;
@@ -1098,10 +1080,10 @@ err:
return ret;
}
-int amdgpu_display_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+static int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int ret, i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b03663f42cc9..ebd37fb19cdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -136,7 +136,7 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
uint amdgpu_pcie_gen_cap;
uint amdgpu_pcie_lane_cap;
-uint amdgpu_cg_mask = 0xffffffff;
+u64 amdgpu_cg_mask = 0xffffffffffffffff;
uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
@@ -454,12 +454,12 @@ MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
/**
- * DOC: cg_mask (uint)
+ * DOC: cg_mask (ullong)
* Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
- * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffffffffffff (all enabled).
*/
MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
-module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+module_param_named(cg_mask, amdgpu_cg_mask, ullong, 0444);
/**
* DOC: pg_mask (uint)
@@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else
adev->in_s3 = true;
- r = amdgpu_device_suspend(drm_dev, true);
- if (r)
- return r;
+ return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
if (!adev->in_s0ix)
- r = amdgpu_asic_reset(adev);
- return r;
+ return amdgpu_asic_reset(adev);
+
+ return 0;
}
static int amdgpu_pmops_resume(struct device *dev)
@@ -2390,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
return amdgpu_device_resume(drm_dev, true);
}
+static int amdgpu_runtime_idle_check_display(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ if (adev->mode_info.num_crtc) {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ /* XXX: Return busy if any displays are connected to avoid
+ * possible display wakeups after runtime resume due to
+ * hotplug events in case any displays were connected while
+ * the GPU was in suspend. Remove this once that is fixed.
+ */
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->status == connector_status_connected) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ mutex_unlock(&drm_dev->mode_config.mutex);
+
+ if (ret)
+ return ret;
+
+ if (amdgpu_device_has_dc_support(adev)) {
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->state->active)
+ ret = -EBUSY;
+ drm_modeset_unlock(&crtc->mutex);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
+ }
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -2402,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
+ ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ return ret;
+
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -2511,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
return -EBUSY;
}
- if (amdgpu_device_has_dc_support(adev)) {
- struct drm_crtc *crtc;
-
- drm_for_each_crtc(crtc, drm_dev) {
- drm_modeset_lock(&crtc->mutex, NULL);
- if (crtc->state->active)
- ret = -EBUSY;
- drm_modeset_unlock(&crtc->mutex);
- if (ret < 0)
- break;
- }
-
- } else {
- struct drm_connector *list_connector;
- struct drm_connector_list_iter iter;
-
- mutex_lock(&drm_dev->mode_config.mutex);
- drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
-
- drm_connector_list_iter_begin(drm_dev, &iter);
- drm_for_each_connector_iter(list_connector, &iter) {
- if (list_connector->dpms == DRM_MODE_DPMS_ON) {
- ret = -EBUSY;
- break;
- }
- }
-
- drm_connector_list_iter_end(&iter);
-
- drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
- mutex_unlock(&drm_dev->mode_config.mutex);
- }
-
- if (ret == -EBUSY)
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ ret = amdgpu_runtime_idle_check_display(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
@@ -2575,6 +2615,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
.prepare = amdgpu_pmops_prepare,
.complete = amdgpu_pmops_complete,
.suspend = amdgpu_pmops_suspend,
+ .suspend_noirq = amdgpu_pmops_suspend_noirq,
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
.thaw = amdgpu_pmops_thaw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 84a53758e18e..652571267077 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -613,7 +613,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index dcb3c7871c73..5ed9b8a4c571 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -202,6 +202,7 @@ struct amdgpu_cu_info {
struct amdgpu_gfx_ras {
struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
+ bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
};
struct amdgpu_gfx_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index a66a0881a934..88b852b3a2cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -25,6 +25,9 @@
*/
#include <linux/io-64-nonatomic-lo-hi.h>
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
#include "amdgpu.h"
#include "amdgpu_gmc.h"
@@ -647,12 +650,14 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
case CHIP_VEGA10:
adev->mman.keep_stolen_vga_memory = true;
/*
- * VEGA10 SRIOV VF needs some firmware reserved area.
+ * VEGA10 SRIOV VF with MS_HYPERV host needs some firmware reserved area.
*/
- if (amdgpu_sriov_vf(adev)) {
- adev->mman.stolen_reserved_offset = 0x100000;
- adev->mman.stolen_reserved_size = 0x600000;
+#ifdef CONFIG_X86
+ if (amdgpu_sriov_vf(adev) && hypervisor_is_type(X86_HYPER_MS_HYPERV)) {
+ adev->mman.stolen_reserved_offset = 0x500000;
+ adev->mman.stolen_reserved_size = 0x200000;
}
+#endif
break;
case CHIP_RAVEN:
case CHIP_RENOIR:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 9181c7bef7c6..ac5c61d3de2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -33,7 +33,7 @@ struct amdgpu_hdp_funcs {
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
void (*update_clock_gating)(struct amdgpu_device *adev, bool enable);
- void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags);
void (*init_registers)(struct amdgpu_device *adev);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 4ba4b54092f1..03d115d2b5ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -260,19 +260,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
uint64_t fence_context = adev->fence_context + ring->idx;
- struct dma_fence *updates = sync->last_vm_update;
bool needs_flush = vm->use_cpu_for_update;
- int r = 0;
+ uint64_t updates = amdgpu_vm_tlb_seq(vm);
+ int r;
*id = vm->reserved_vmid[vmhub];
- if (updates && (*id)->flushed_updates &&
- updates->context == (*id)->flushed_updates->context &&
- !dma_fence_is_later(updates, (*id)->flushed_updates))
- updates = NULL;
-
if ((*id)->owner != vm->immediate.fence_context ||
- job->vm_pd_addr != (*id)->pd_gpu_addr ||
- updates || !(*id)->last_flush ||
+ (*id)->pd_gpu_addr != job->vm_pd_addr ||
+ (*id)->flushed_updates < updates ||
+ !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
@@ -286,8 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) {
*id = NULL;
- r = amdgpu_sync_fence(sync, tmp);
- return r;
+ return amdgpu_sync_fence(sync, tmp);
}
needs_flush = true;
}
@@ -299,10 +294,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
if (r)
return r;
- if (updates) {
- dma_fence_put((*id)->flushed_updates);
- (*id)->flushed_updates = dma_fence_get(updates);
- }
+ (*id)->flushed_updates = updates;
job->vm_needs_flush = needs_flush;
return 0;
}
@@ -330,7 +322,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
- struct dma_fence *updates = sync->last_vm_update;
+ uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
job->vm_needs_flush = vm->use_cpu_for_update;
@@ -338,7 +330,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
bool needs_flush = vm->use_cpu_for_update;
- struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
if ((*id)->owner != vm->immediate.fence_context)
@@ -352,8 +343,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
!dma_fence_is_signaled((*id)->last_flush)))
needs_flush = true;
- flushed = (*id)->flushed_updates;
- if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ if ((*id)->flushed_updates < updates)
needs_flush = true;
if (needs_flush && !adev->vm_manager.concurrent_flush)
@@ -366,11 +356,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if (r)
return r;
- if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
- dma_fence_put((*id)->flushed_updates);
- (*id)->flushed_updates = dma_fence_get(updates);
- }
-
+ (*id)->flushed_updates = updates;
job->vm_needs_flush |= needs_flush;
return 0;
}
@@ -416,8 +402,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
goto error;
if (!id) {
- struct dma_fence *updates = sync->last_vm_update;
-
/* Still no ID to use? Then use the idle one found earlier */
id = idle;
@@ -426,8 +410,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
+ id->flushed_updates = amdgpu_vm_tlb_seq(vm);
job->vm_needs_flush = true;
}
@@ -594,7 +577,6 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
struct amdgpu_vmid *id = &id_mgr->ids[j];
amdgpu_sync_free(&id->active);
- dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
dma_fence_put(id->pasid_mapping);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 0c3b4fa1f936..06c8a0034fa5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -47,7 +47,7 @@ struct amdgpu_vmid {
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
- struct dma_fence *flushed_updates;
+ uint64_t flushed_updates;
uint32_t current_gpu_reset_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 55fbff2be761..b6c7fb00e05a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_JPEG_H__
#define __AMDGPU_JPEG_H__
+#include "amdgpu_ras.h"
+
#define AMDGPU_MAX_JPEG_INSTANCES 2
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
@@ -39,6 +41,10 @@ struct amdgpu_jpeg_inst {
struct amdgpu_jpeg_reg external;
};
+struct amdgpu_jpeg_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
struct amdgpu_jpeg {
uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
@@ -48,6 +54,8 @@ struct amdgpu_jpeg {
enum amd_powergating_state cur_state;
struct mutex jpeg_pg_lock;
atomic_t total_submission_cnt;
+ struct ras_common_if *ras_if;
+ struct amdgpu_jpeg_ras *ras;
};
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 9f1540f0ebf9..f939395c5914 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -34,7 +34,7 @@ struct amdgpu_mmhub_funcs {
void (*gart_disable)(struct amdgpu_device *adev);
int (*set_clockgating)(struct amdgpu_device *adev,
enum amd_clockgating_state state);
- void (*get_clockgating)(struct amdgpu_device *adev, u32 *flags);
+ void (*get_clockgating)(struct amdgpu_device *adev, u64 *flags);
void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 72c6f6cb7a44..f80b4838cea1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -592,19 +592,6 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
-int amdgpu_display_gem_fb_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-int amdgpu_display_gem_fb_verify_and_init(
- struct drm_device *dev, struct amdgpu_framebuffer *rfb,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-int amdgpu_display_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void amdgpu_enc_destroy(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 3d13e601fc35..03439083182a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -83,7 +83,7 @@ struct amdgpu_nbio_funcs {
void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
bool enable);
void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
+ u64 *flags);
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e92ecabfa7bd..5444515c1476 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -472,7 +472,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
fail:
DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
- man->size << PAGE_SHIFT);
+ man->size);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index a6acec1a6155..f6527aa19238 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -46,8 +46,6 @@ static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
static int psp_load_smu_fw(struct psp_context *psp);
-static int psp_ta_unload(struct psp_context *psp, struct ta_context *context);
-static int psp_ta_load(struct psp_context *psp, struct ta_context *context);
static int psp_rap_terminate(struct psp_context *psp);
static int psp_securedisplay_terminate(struct psp_context *psp);
@@ -862,7 +860,7 @@ static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_unload_ta.session_id = session_id;
}
-static int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
+int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
@@ -944,7 +942,7 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
}
-static int psp_ta_init_shared_buf(struct psp_context *psp,
+int psp_ta_init_shared_buf(struct psp_context *psp,
struct ta_mem_context *mem_ctx)
{
/*
@@ -958,7 +956,7 @@ static int psp_ta_init_shared_buf(struct psp_context *psp,
&mem_ctx->shared_buf);
}
-static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
+void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
{
amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
@@ -969,6 +967,42 @@ static int psp_xgmi_init_shared_buf(struct psp_context *psp)
return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
}
+static void psp_prep_ta_invoke_indirect_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ struct ta_context *context)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = context->session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+
+ cmd->cmd.cmd_invoke_cmd.buf.num_desc = 1;
+ cmd->cmd.cmd_invoke_cmd.buf.total_size = context->mem_context.shared_mem_size;
+ cmd->cmd.cmd_invoke_cmd.buf.buf_desc[0].buf_size = context->mem_context.shared_mem_size;
+ cmd->cmd.cmd_invoke_cmd.buf.buf_desc[0].buf_phy_addr_lo =
+ lower_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_invoke_cmd.buf.buf_desc[0].buf_phy_addr_hi =
+ upper_32_bits(context->mem_context.shared_mc_addr);
+}
+
+int psp_ta_invoke_indirect(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ struct ta_context *context)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_ta_invoke_indirect_cmd_buf(cmd, ta_cmd_id, context);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ context->resp_status = cmd->resp.status;
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint32_t ta_cmd_id,
uint32_t session_id)
@@ -978,7 +1012,7 @@ static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
}
-static int psp_ta_invoke(struct psp_context *psp,
+int psp_ta_invoke(struct psp_context *psp,
uint32_t ta_cmd_id,
struct ta_context *context)
{
@@ -990,12 +1024,14 @@ static int psp_ta_invoke(struct psp_context *psp,
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
+ context->resp_status = cmd->resp.status;
+
release_psp_cmd_buf(psp);
return ret;
}
-static int psp_ta_load(struct psp_context *psp, struct ta_context *context)
+int psp_ta_load(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
@@ -1010,6 +1046,8 @@ static int psp_ta_load(struct psp_context *psp, struct ta_context *context)
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
+ context->resp_status = cmd->resp.status;
+
if (!ret) {
context->session_id = cmd->resp.session_id;
}
@@ -1415,7 +1453,7 @@ int psp_ras_enable_features(struct psp_context *psp,
return 0;
}
-static int psp_ras_terminate(struct psp_context *psp)
+int psp_ras_terminate(struct psp_context *psp)
{
int ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index ff7d533eb746..cf8d3199b35b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -48,6 +48,17 @@ enum psp_shared_mem_size {
PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000,
};
+enum ta_type_id {
+ TA_TYPE_XGMI = 1,
+ TA_TYPE_RAS,
+ TA_TYPE_HDCP,
+ TA_TYPE_DTM,
+ TA_TYPE_RAP,
+ TA_TYPE_SECUREDISPLAY,
+
+ TA_TYPE_MAX_INDEX,
+};
+
struct psp_context;
struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
@@ -151,9 +162,11 @@ struct ta_mem_context {
struct ta_context {
bool initialized;
uint32_t session_id;
+ uint32_t resp_status;
struct ta_mem_context mem_context;
struct psp_bin_desc bin_desc;
enum psp_gfx_cmd_id ta_load_type;
+ enum ta_type_id ta_type;
};
struct ta_cp_context {
@@ -407,6 +420,18 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
+int psp_ta_init_shared_buf(struct psp_context *psp,
+ struct ta_mem_context *mem_ctx);
+void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx);
+int psp_ta_unload(struct psp_context *psp, struct ta_context *context);
+int psp_ta_load(struct psp_context *psp, struct ta_context *context);
+int psp_ta_invoke(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ struct ta_context *context);
+int psp_ta_invoke_indirect(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ struct ta_context *context);
+
int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -425,6 +450,7 @@ int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
int psp_ras_trigger_error(struct psp_context *psp,
struct ta_ras_trigger_error_input *info);
+int psp_ras_terminate(struct psp_context *psp);
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
new file mode 100644
index 000000000000..247a476e6354
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_psp_ta.h"
+
+static const char *TA_IF_FS_NAME = "ta_if";
+
+struct dentry *dir;
+static struct dentry *ta_load_debugfs_dentry;
+static struct dentry *ta_unload_debugfs_dentry;
+static struct dentry *ta_invoke_debugfs_dentry;
+
+static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off);
+static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off);
+static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off);
+
+
+static uint32_t get_bin_version(const uint8_t *bin)
+{
+ const struct common_firmware_header *hdr =
+ (const struct common_firmware_header *)bin;
+
+ return hdr->ucode_version;
+}
+
+static void prep_ta_mem_context(struct psp_context *psp,
+ struct ta_context *context,
+ uint8_t *shared_buf,
+ uint32_t shared_buf_len)
+{
+ context->mem_context.shared_mem_size = PAGE_ALIGN(shared_buf_len);
+ psp_ta_init_shared_buf(psp, &context->mem_context);
+
+ memcpy((void *)context->mem_context.shared_buf, shared_buf, shared_buf_len);
+}
+
+static bool is_ta_type_valid(enum ta_type_id ta_type)
+{
+ bool ret = false;
+
+ switch (ta_type) {
+ case TA_TYPE_RAS:
+ ret = true;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations ta_load_debugfs_fops = {
+ .write = ta_if_load_debugfs_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations ta_unload_debugfs_fops = {
+ .write = ta_if_unload_debugfs_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations ta_invoke_debugfs_fops = {
+ .write = ta_if_invoke_debugfs_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE
+};
+
+
+/**
+ * DOC: AMDGPU TA debugfs interfaces
+ *
+ * Three debugfs interfaces can be opened by a program to
+ * load/invoke/unload TA,
+ *
+ * - /sys/kernel/debug/dri/<N>/ta_if/ta_load
+ * - /sys/kernel/debug/dri/<N>/ta_if/ta_invoke
+ * - /sys/kernel/debug/dri/<N>/ta_if/ta_unload
+ *
+ * How to use the interfaces in a program?
+ *
+ * A program needs to provide transmit buffer to the interfaces
+ * and will receive buffer from the interfaces below,
+ *
+ * - For TA load debugfs interface:
+ * Transmit buffer:
+ * - TA type (4bytes)
+ * - TA bin length (4bytes)
+ * - TA bin
+ * Receive buffer:
+ * - TA ID (4bytes)
+ *
+ * - For TA invoke debugfs interface:
+ * Transmit buffer:
+ * - TA ID (4bytes)
+ * - TA CMD ID (4bytes)
+ * - TA shard buf length (4bytes)
+ * - TA shared buf
+ * Receive buffer:
+ * - TA shared buf
+ *
+ * - For TA unload debugfs interface:
+ * Transmit buffer:
+ * - TA ID (4bytes)
+ */
+
+static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
+{
+ uint32_t ta_type = 0;
+ uint32_t ta_bin_len = 0;
+ uint8_t *ta_bin = NULL;
+ uint32_t copy_pos = 0;
+ int ret = 0;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_context context = {0};
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
+ if (ret || (!is_ta_type_valid(ta_type)))
+ return -EINVAL;
+
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&ta_bin_len, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EINVAL;
+
+ copy_pos += sizeof(uint32_t);
+
+ ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
+ if (!ta_bin)
+ ret = -ENOMEM;
+ ret = copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len);
+ if (ret)
+ goto err_free_bin;
+
+ ret = psp_ras_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to unload embedded RAS TA\n");
+ goto err_free_bin;
+ }
+
+ context.ta_type = ta_type;
+ context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ context.bin_desc.fw_version = get_bin_version(ta_bin);
+ context.bin_desc.size_bytes = ta_bin_len;
+ context.bin_desc.start_addr = ta_bin;
+
+ ret = psp_ta_load(psp, &context);
+
+ if (ret || context.resp_status) {
+ dev_err(adev->dev, "TA load via debugfs failed (%d) status %d\n",
+ ret, context.resp_status);
+ goto err_free_bin;
+ }
+
+ context.initialized = true;
+ ret = copy_to_user((char *)buf, (void *)&context.session_id, sizeof(uint32_t));
+
+err_free_bin:
+ kfree(ta_bin);
+
+ return ret;
+}
+
+static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
+{
+ uint32_t ta_id = 0;
+ int ret = 0;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_context context = {0};
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = copy_from_user((void *)&ta_id, buf, sizeof(uint32_t));
+ if (ret)
+ return -EINVAL;
+
+ context.session_id = ta_id;
+
+ ret = psp_ta_unload(psp, &context);
+ if (!ret)
+ context.initialized = false;
+
+ return ret;
+}
+
+static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
+{
+ uint32_t ta_id = 0;
+ uint32_t cmd_id = 0;
+ uint32_t shared_buf_len = 0;
+ uint8_t *shared_buf = NULL;
+ uint32_t copy_pos = 0;
+ int ret = 0;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_context context = {0};
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = copy_from_user((void *)&ta_id, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EINVAL;
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&cmd_id, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EINVAL;
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&shared_buf_len, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EINVAL;
+ copy_pos += sizeof(uint32_t);
+
+ shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
+ if (!shared_buf)
+ ret = -ENOMEM;
+ ret = copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len);
+ if (ret)
+ goto err_free_shared_buf;
+
+ context.session_id = ta_id;
+
+ prep_ta_mem_context(psp, &context, shared_buf, shared_buf_len);
+
+ ret = psp_ta_invoke_indirect(psp, cmd_id, &context);
+
+ if (ret || context.resp_status) {
+ dev_err(adev->dev, "TA invoke via debugfs failed (%d) status %d\n",
+ ret, context.resp_status);
+ goto err_free_ta_shared_buf;
+ }
+
+ ret = copy_to_user((char *)buf, context.mem_context.shared_buf, shared_buf_len);
+
+err_free_ta_shared_buf:
+ psp_ta_free_shared_buf(&context.mem_context);
+
+err_free_shared_buf:
+ kfree(shared_buf);
+
+ return ret;
+}
+
+static struct dentry *amdgpu_ta_if_debugfs_create(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ dir = debugfs_create_dir(TA_IF_FS_NAME, minor->debugfs_root);
+
+ ta_load_debugfs_dentry = debugfs_create_file("ta_load", 0200, dir, adev,
+ &ta_load_debugfs_fops);
+
+ ta_unload_debugfs_dentry = debugfs_create_file("ta_unload", 0200, dir,
+ adev, &ta_unload_debugfs_fops);
+
+ ta_invoke_debugfs_dentry = debugfs_create_file("ta_invoke", 0200, dir,
+ adev, &ta_invoke_debugfs_fops);
+ return dir;
+}
+
+void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ dir = amdgpu_ta_if_debugfs_create(adev);
+#endif
+}
+
+void amdgpu_ta_if_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dir);
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h
index f437d36dd98c..883f89d57616 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h
@@ -1,6 +1,5 @@
/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
+ * Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -9,30 +8,23 @@
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#ifndef R600_BLIT_SHADERS_H
-#define R600_BLIT_SHADERS_H
+#ifndef __AMDGPU_PSP_TA_H__
+#define __AMDGPU_PSP_TA_H__
-extern const u32 r6xx_ps[];
-extern const u32 r6xx_vs[];
-extern const u32 r7xx_default_state[];
-extern const u32 r6xx_default_state[];
-
-
-extern const u32 r6xx_ps_size, r6xx_vs_size;
-extern const u32 r6xx_default_size, r7xx_default_size;
+void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev);
+void amdgpu_ta_if_debugfs_remove(void);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 424c22a841f4..ec709997c9c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -66,6 +66,8 @@ const char *ras_block_string[] = {
"mp1",
"fuse",
"mca",
+ "vcn",
+ "jpeg",
};
const char *ras_mca_block_string[] = {
@@ -2205,6 +2207,13 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
dev_info(adev->dev, "SRAM ECC is active.\n");
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
+
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 9314fde81e68..606df8869b89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -49,6 +49,8 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MP1,
AMDGPU_RAS_BLOCK__FUSE,
AMDGPU_RAS_BLOCK__MCA,
+ AMDGPU_RAS_BLOCK__VCN,
+ AMDGPU_RAS_BLOCK__JPEG,
AMDGPU_RAS_BLOCK__LAST
};
@@ -506,6 +508,7 @@ struct amdgpu_ras_block_hw_ops {
void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status);
void (*reset_ras_error_count)(struct amdgpu_device *adev);
void (*reset_ras_error_status)(struct amdgpu_device *adev);
+ bool (*query_poison_status)(struct amdgpu_device *adev);
};
/* work flow
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index 484bb3dcec47..c7a823f3f2c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -27,7 +27,7 @@ struct amdgpu_smuio_funcs {
u32 (*get_rom_index_offset)(struct amdgpu_device *adev);
u32 (*get_rom_data_offset)(struct amdgpu_device *adev);
void (*update_rom_clock_gating)(struct amdgpu_device *adev, bool enable);
- void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags);
u32 (*get_die_id)(struct amdgpu_device *adev);
u32 (*get_socket_id)(struct amdgpu_device *adev);
bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 11c46b3e4c60..504af1b93bfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -51,7 +51,6 @@ static struct kmem_cache *amdgpu_sync_slab;
void amdgpu_sync_create(struct amdgpu_sync *sync)
{
hash_init(sync->fences);
- sync->last_vm_update = NULL;
}
/**
@@ -171,23 +170,6 @@ int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f)
return 0;
}
-/**
- * amdgpu_sync_vm_fence - remember to sync to this VM fence
- *
- * @sync: sync object to add fence to
- * @fence: the VM fence to add
- *
- * Add the fence to the sync object and remember it as VM update.
- */
-int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
-{
- if (!fence)
- return 0;
-
- amdgpu_sync_keep_later(&sync->last_vm_update, fence);
- return amdgpu_sync_fence(sync, fence);
-}
-
/* Determine based on the owner and mode if we should sync to a fence or not */
static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
enum amdgpu_sync_mode mode,
@@ -377,9 +359,6 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
}
}
- dma_fence_put(clone->last_vm_update);
- clone->last_vm_update = dma_fence_get(source->last_vm_update);
-
return 0;
}
@@ -420,8 +399,6 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
dma_fence_put(e->fence);
kmem_cache_free(amdgpu_sync_slab, e);
}
-
- dma_fence_put(sync->last_vm_update);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 7c0fe20c470d..2d5c613cda10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -43,12 +43,10 @@ enum amdgpu_sync_mode {
*/
struct amdgpu_sync {
DECLARE_HASHTABLE(fences, 4);
- struct dma_fence *last_vm_update;
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f);
-int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f99093f2ebc7..a0ee828a4a97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,7 @@
#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index f06fb7f882e2..fb39065a96bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_VCN_H__
#define __AMDGPU_VCN_H__
+#include "amdgpu_ras.h"
+
#define AMDGPU_VCN_STACK_SIZE (128*1024)
#define AMDGPU_VCN_CONTEXT_SIZE (512*1024)
@@ -233,6 +235,10 @@ struct amdgpu_vcn_inst {
struct amdgpu_vcn_fw_shared fw_shared;
};
+struct amdgpu_vcn_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
@@ -252,6 +258,9 @@ struct amdgpu_vcn {
unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
+
+ struct ras_common_if *ras_if;
+ struct amdgpu_vcn_ras *ras;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index a025f080aa6a..ea92edcc0432 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,6 +23,10 @@
#include <linux/module.h>
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
+
#include <drm/drm_drv.h>
#include "amdgpu.h"
@@ -723,8 +727,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
break;
case CHIP_VEGA10:
soc15_set_virt_ops(adev);
- /* send a dummy GPU_INIT_DATA request to host on vega10 */
- amdgpu_virt_request_init_data(adev);
+#ifdef CONFIG_X86
+ /* not send GPU_INIT_DATA with MS_HYPERV*/
+ if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
+#endif
+ /* send a dummy GPU_INIT_DATA request to host on vega10 */
+ amdgpu_virt_request_init_data(adev);
break;
case CHIP_VEGA20:
case CHIP_ARCTURUS:
@@ -862,11 +870,11 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
uint32_t timeout = 50000;
uint32_t i, tmp;
uint32_t ret = 0;
- static void *scratch_reg0;
- static void *scratch_reg1;
- static void *scratch_reg2;
- static void *scratch_reg3;
- static void *spare_int;
+ void *scratch_reg0;
+ void *scratch_reg1;
+ void *scratch_reg2;
+ void *scratch_reg3;
+ void *spare_int;
if (!adev->gfx.rlc.rlcg_reg_access_supported) {
dev_err(adev->dev,
@@ -919,7 +927,7 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
dev_err(adev->dev,
- "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset);
+ "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
} else {
dev_err(adev->dev,
"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5277c10d901d..f9479e23de18 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -89,6 +89,21 @@ struct amdgpu_prt_cb {
};
/**
+ * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence
+ */
+struct amdgpu_vm_tlb_seq_cb {
+ /**
+ * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
+ */
+ struct amdgpu_vm *vm;
+
+ /**
+ * @cb: callback
+ */
+ struct dma_fence_cb cb;
+};
+
+/**
* amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
*
* @adev: amdgpu_device pointer
@@ -155,108 +170,6 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_level_shift - return the addr shift for each level
- *
- * @adev: amdgpu_device pointer
- * @level: VMPT level
- *
- * Returns:
- * The number of bits the pfn needs to be right shifted for a level.
- */
-static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
- unsigned level)
-{
- switch (level) {
- case AMDGPU_VM_PDB2:
- case AMDGPU_VM_PDB1:
- case AMDGPU_VM_PDB0:
- return 9 * (AMDGPU_VM_PDB0 - level) +
- adev->vm_manager.block_size;
- case AMDGPU_VM_PTB:
- return 0;
- default:
- return ~0;
- }
-}
-
-/**
- * amdgpu_vm_num_entries - return the number of entries in a PD/PT
- *
- * @adev: amdgpu_device pointer
- * @level: VMPT level
- *
- * Returns:
- * The number of entries in a page directory or page table.
- */
-static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
- unsigned level)
-{
- unsigned shift = amdgpu_vm_level_shift(adev,
- adev->vm_manager.root_level);
-
- if (level == adev->vm_manager.root_level)
- /* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
- >> shift;
- else if (level != AMDGPU_VM_PTB)
- /* Everything in between */
- return 512;
- else
- /* For the page tables on the leaves */
- return AMDGPU_VM_PTE_COUNT(adev);
-}
-
-/**
- * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
- *
- * @adev: amdgpu_device pointer
- *
- * Returns:
- * The number of entries in the root page directory which needs the ATS setting.
- */
-static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
-{
- unsigned shift;
-
- shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
- return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
-}
-
-/**
- * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
- *
- * @adev: amdgpu_device pointer
- * @level: VMPT level
- *
- * Returns:
- * The mask to extract the entry number of a PD/PT from an address.
- */
-static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
- unsigned int level)
-{
- if (level <= adev->vm_manager.root_level)
- return 0xffffffff;
- else if (level != AMDGPU_VM_PTB)
- return 0x1ff;
- else
- return AMDGPU_VM_PTE_COUNT(adev) - 1;
-}
-
-/**
- * amdgpu_vm_bo_size - returns the size of the BOs in bytes
- *
- * @adev: amdgpu_device pointer
- * @level: VMPT level
- *
- * Returns:
- * The size of the BO for a page directory or page table in bytes.
- */
-static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
-{
- return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
-}
-
-/**
* amdgpu_vm_bo_evicted - vm_bo is evicted
*
* @vm_bo: vm_bo which is evicted
@@ -358,9 +271,8 @@ static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
* Initialize a bo_va_base structure and add it to the appropriate lists
*
*/
-static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
- struct amdgpu_vm *vm,
- struct amdgpu_bo *bo)
+void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
base->vm = vm;
base->bo = bo;
@@ -396,228 +308,6 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
}
/**
- * amdgpu_vm_pt_parent - get the parent page directory
- *
- * @pt: child page table
- *
- * Helper to get the parent entry for the child page table. NULL if we are at
- * the root page directory.
- */
-static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
-{
- struct amdgpu_bo *parent = pt->bo->parent;
-
- if (!parent)
- return NULL;
-
- return parent->vm_bo;
-}
-
-/*
- * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
- */
-struct amdgpu_vm_pt_cursor {
- uint64_t pfn;
- struct amdgpu_vm_bo_base *parent;
- struct amdgpu_vm_bo_base *entry;
- unsigned level;
-};
-
-/**
- * amdgpu_vm_pt_start - start PD/PT walk
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm structure
- * @start: start address of the walk
- * @cursor: state to initialize
- *
- * Initialize a amdgpu_vm_pt_cursor to start a walk.
- */
-static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, uint64_t start,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- cursor->pfn = start;
- cursor->parent = NULL;
- cursor->entry = &vm->root;
- cursor->level = adev->vm_manager.root_level;
-}
-
-/**
- * amdgpu_vm_pt_descendant - go to child node
- *
- * @adev: amdgpu_device pointer
- * @cursor: current state
- *
- * Walk to the child node of the current node.
- * Returns:
- * True if the walk was possible, false otherwise.
- */
-static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- unsigned mask, shift, idx;
-
- if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
- !cursor->entry->bo)
- return false;
-
- mask = amdgpu_vm_entries_mask(adev, cursor->level);
- shift = amdgpu_vm_level_shift(adev, cursor->level);
-
- ++cursor->level;
- idx = (cursor->pfn >> shift) & mask;
- cursor->parent = cursor->entry;
- cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
- return true;
-}
-
-/**
- * amdgpu_vm_pt_sibling - go to sibling node
- *
- * @adev: amdgpu_device pointer
- * @cursor: current state
- *
- * Walk to the sibling node of the current node.
- * Returns:
- * True if the walk was possible, false otherwise.
- */
-static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- unsigned shift, num_entries;
-
- /* Root doesn't have a sibling */
- if (!cursor->parent)
- return false;
-
- /* Go to our parents and see if we got a sibling */
- shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
- num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
-
- if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1])
- return false;
-
- cursor->pfn += 1ULL << shift;
- cursor->pfn &= ~((1ULL << shift) - 1);
- ++cursor->entry;
- return true;
-}
-
-/**
- * amdgpu_vm_pt_ancestor - go to parent node
- *
- * @cursor: current state
- *
- * Walk to the parent node of the current node.
- * Returns:
- * True if the walk was possible, false otherwise.
- */
-static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
-{
- if (!cursor->parent)
- return false;
-
- --cursor->level;
- cursor->entry = cursor->parent;
- cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
- return true;
-}
-
-/**
- * amdgpu_vm_pt_next - get next PD/PT in hieratchy
- *
- * @adev: amdgpu_device pointer
- * @cursor: current state
- *
- * Walk the PD/PT tree to the next node.
- */
-static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- /* First try a newborn child */
- if (amdgpu_vm_pt_descendant(adev, cursor))
- return;
-
- /* If that didn't worked try to find a sibling */
- while (!amdgpu_vm_pt_sibling(adev, cursor)) {
- /* No sibling, go to our parents and grandparents */
- if (!amdgpu_vm_pt_ancestor(cursor)) {
- cursor->pfn = ~0ll;
- return;
- }
- }
-}
-
-/**
- * amdgpu_vm_pt_first_dfs - start a deep first search
- *
- * @adev: amdgpu_device structure
- * @vm: amdgpu_vm structure
- * @start: optional cursor to start with
- * @cursor: state to initialize
- *
- * Starts a deep first traversal of the PD/PT tree.
- */
-static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- if (start)
- *cursor = *start;
- else
- amdgpu_vm_pt_start(adev, vm, 0, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
-}
-
-/**
- * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
- *
- * @start: starting point for the search
- * @entry: current entry
- *
- * Returns:
- * True when the search should continue, false otherwise.
- */
-static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
- struct amdgpu_vm_bo_base *entry)
-{
- return entry && (!start || entry != start->entry);
-}
-
-/**
- * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
- *
- * @adev: amdgpu_device structure
- * @cursor: current state
- *
- * Move the cursor to the next node in a deep first search.
- */
-static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- if (!cursor->entry)
- return;
-
- if (!cursor->parent)
- cursor->entry = NULL;
- else if (amdgpu_vm_pt_sibling(adev, cursor))
- while (amdgpu_vm_pt_descendant(adev, cursor));
- else
- amdgpu_vm_pt_ancestor(cursor);
-}
-
-/*
- * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
- */
-#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
- for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
- (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
- amdgpu_vm_pt_continue_dfs((start), (entry)); \
- (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
-
-/**
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
*
* @vm: vm providing the BOs
@@ -726,316 +416,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_clear_bo - initially clear the PDs/PTs
- *
- * @adev: amdgpu_device pointer
- * @vm: VM to clear BO from
- * @vmbo: BO to clear
- * @immediate: use an immediate update
- *
- * Root PD needs to be reserved when calling this.
- *
- * Returns:
- * 0 on success, errno otherwise.
- */
-static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_vm *vmbo,
- bool immediate)
-{
- struct ttm_operation_ctx ctx = { true, false };
- unsigned level = adev->vm_manager.root_level;
- struct amdgpu_vm_update_params params;
- struct amdgpu_bo *ancestor = &vmbo->bo;
- struct amdgpu_bo *bo = &vmbo->bo;
- unsigned entries, ats_entries;
- uint64_t addr;
- int r, idx;
-
- /* Figure out our place in the hierarchy */
- if (ancestor->parent) {
- ++level;
- while (ancestor->parent->parent) {
- ++level;
- ancestor = ancestor->parent;
- }
- }
-
- entries = amdgpu_bo_size(bo) / 8;
- if (!vm->pte_support_ats) {
- ats_entries = 0;
-
- } else if (!bo->parent) {
- ats_entries = amdgpu_vm_num_ats_entries(adev);
- ats_entries = min(ats_entries, entries);
- entries -= ats_entries;
-
- } else {
- struct amdgpu_vm_bo_base *pt;
-
- pt = ancestor->vm_bo;
- ats_entries = amdgpu_vm_num_ats_entries(adev);
- if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) {
- ats_entries = 0;
- } else {
- ats_entries = entries;
- entries = 0;
- }
- }
-
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- return r;
-
- if (vmbo->shadow) {
- struct amdgpu_bo *shadow = vmbo->shadow;
-
- r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
- if (r)
- return r;
- }
-
- if (!drm_dev_enter(adev_to_drm(adev), &idx))
- return -ENODEV;
-
- r = vm->update_funcs->map_table(vmbo);
- if (r)
- goto exit;
-
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- params.vm = vm;
- params.immediate = immediate;
-
- r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
- if (r)
- goto exit;
-
- addr = 0;
- if (ats_entries) {
- uint64_t value = 0, flags;
-
- flags = AMDGPU_PTE_DEFAULT_ATC;
- if (level != AMDGPU_VM_PTB) {
- /* Handle leaf PDEs as PTEs */
- flags |= AMDGPU_PDE_PTE;
- amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
- }
-
- r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
- value, flags);
- if (r)
- goto exit;
-
- addr += ats_entries * 8;
- }
-
- if (entries) {
- uint64_t value = 0, flags = 0;
-
- if (adev->asic_type >= CHIP_VEGA10) {
- if (level != AMDGPU_VM_PTB) {
- /* Handle leaf PDEs as PTEs */
- flags |= AMDGPU_PDE_PTE;
- amdgpu_gmc_get_vm_pde(adev, level,
- &value, &flags);
- } else {
- /* Workaround for fault priority problem on GMC9 */
- flags = AMDGPU_PTE_EXECUTABLE;
- }
- }
-
- r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
- value, flags);
- if (r)
- goto exit;
- }
-
- r = vm->update_funcs->commit(&params, NULL);
-exit:
- drm_dev_exit(idx);
- return r;
-}
-
-/**
- * amdgpu_vm_pt_create - create bo for PD/PT
- *
- * @adev: amdgpu_device pointer
- * @vm: requesting vm
- * @level: the page table level
- * @immediate: use a immediate update
- * @vmbo: pointer to the buffer object pointer
- */
-static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- int level, bool immediate,
- struct amdgpu_bo_vm **vmbo)
-{
- struct amdgpu_bo_param bp;
- struct amdgpu_bo *bo;
- struct dma_resv *resv;
- unsigned int num_entries;
- int r;
-
- memset(&bp, 0, sizeof(bp));
-
- bp.size = amdgpu_vm_bo_size(adev, level);
- bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-
- if (level < AMDGPU_VM_PTB)
- num_entries = amdgpu_vm_num_entries(adev, level);
- else
- num_entries = 0;
-
- bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
-
- if (vm->use_cpu_for_update)
- bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-
- bp.type = ttm_bo_type_kernel;
- bp.no_wait_gpu = immediate;
- if (vm->root.bo)
- bp.resv = vm->root.bo->tbo.base.resv;
-
- r = amdgpu_bo_create_vm(adev, &bp, vmbo);
- if (r)
- return r;
-
- bo = &(*vmbo)->bo;
- if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
- (*vmbo)->shadow = NULL;
- return 0;
- }
-
- if (!bp.resv)
- WARN_ON(dma_resv_lock(bo->tbo.base.resv,
- NULL));
- resv = bp.resv;
- memset(&bp, 0, sizeof(bp));
- bp.size = amdgpu_vm_bo_size(adev, level);
- bp.domain = AMDGPU_GEM_DOMAIN_GTT;
- bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.base.resv;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
- r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
-
- if (!resv)
- dma_resv_unlock(bo->tbo.base.resv);
-
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
- amdgpu_bo_add_to_shadow_list(*vmbo);
-
- return 0;
-}
-
-/**
- * amdgpu_vm_alloc_pts - Allocate a specific page table
- *
- * @adev: amdgpu_device pointer
- * @vm: VM to allocate page tables for
- * @cursor: Which page table to allocate
- * @immediate: use an immediate update
- *
- * Make sure a specific page table or directory is allocated.
- *
- * Returns:
- * 1 if page table needed to be allocated, 0 if page table was already
- * allocated, negative errno if an error occurred.
- */
-static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor,
- bool immediate)
-{
- struct amdgpu_vm_bo_base *entry = cursor->entry;
- struct amdgpu_bo *pt_bo;
- struct amdgpu_bo_vm *pt;
- int r;
-
- if (entry->bo)
- return 0;
-
- r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
- if (r)
- return r;
-
- /* Keep a reference to the root directory to avoid
- * freeing them up in the wrong order.
- */
- pt_bo = &pt->bo;
- pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
- amdgpu_vm_bo_base_init(entry, vm, pt_bo);
- r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
- if (r)
- goto error_free_pt;
-
- return 0;
-
-error_free_pt:
- amdgpu_bo_unref(&pt->shadow);
- amdgpu_bo_unref(&pt_bo);
- return r;
-}
-
-/**
- * amdgpu_vm_free_table - fre one PD/PT
- *
- * @entry: PDE to free
- */
-static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry)
-{
- struct amdgpu_bo *shadow;
-
- if (!entry->bo)
- return;
-
- shadow = amdgpu_bo_shadowed(entry->bo);
- if (shadow) {
- ttm_bo_set_bulk_move(&shadow->tbo, NULL);
- amdgpu_bo_unref(&shadow);
- }
-
- ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- entry->bo->vm_bo = NULL;
- list_del(&entry->vm_status);
- amdgpu_bo_unref(&entry->bo);
-}
-
-/**
- * amdgpu_vm_free_pts - free PD/PT levels
- *
- * @adev: amdgpu device structure
- * @vm: amdgpu vm structure
- * @start: optional cursor where to start freeing PDs/PTs
- *
- * Free the page directory or page table level and all sub levels.
- */
-static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start)
-{
- struct amdgpu_vm_pt_cursor cursor;
- struct amdgpu_vm_bo_base *entry;
-
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
- amdgpu_vm_free_table(entry);
-
- if (start)
- amdgpu_vm_free_table(start->entry);
-}
-
-/**
* amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
*
* @adev: amdgpu_device pointer
@@ -1282,53 +662,6 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
}
/**
- * amdgpu_vm_update_pde - update a single level in the hierarchy
- *
- * @params: parameters for the update
- * @vm: requested vm
- * @entry: entry to update
- *
- * Makes sure the requested entry in parent is up to date.
- */
-static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_bo_base *entry)
-{
- struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
- struct amdgpu_bo *bo = parent->bo, *pbo;
- uint64_t pde, pt, flags;
- unsigned level;
-
- for (level = 0, pbo = bo->parent; pbo; ++level)
- pbo = pbo->parent;
-
- level += params->adev->vm_manager.root_level;
- amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
- pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
- return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
- 1, 0, flags);
-}
-
-/**
- * amdgpu_vm_invalidate_pds - mark all PDs as invalid
- *
- * @adev: amdgpu_device pointer
- * @vm: related vm
- *
- * Mark all PD level as invalid after an error.
- */
-static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- struct amdgpu_vm_pt_cursor cursor;
- struct amdgpu_vm_bo_base *entry;
-
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
- if (entry->bo && !entry->moved)
- amdgpu_vm_bo_relocated(entry);
-}
-
-/**
* amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
@@ -1344,6 +677,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
+ struct amdgpu_vm_bo_base *entry;
int r, idx;
if (list_empty(&vm->relocated))
@@ -1359,17 +693,10 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
- goto exit;
-
- while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_bo_base *entry;
-
- entry = list_first_entry(&vm->relocated,
- struct amdgpu_vm_bo_base,
- vm_status);
- amdgpu_vm_bo_idle(entry);
+ goto error;
- r = amdgpu_vm_update_pde(&params, vm, entry);
+ list_for_each_entry(entry, &vm->relocated, vm_status) {
+ r = amdgpu_vm_pde_update(&params, entry);
if (r)
goto error;
}
@@ -1377,297 +704,68 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, &vm->last_update);
if (r)
goto error;
- drm_dev_exit(idx);
- return 0;
+
+ while (!list_empty(&vm->relocated)) {
+ entry = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ amdgpu_vm_bo_idle(entry);
+ }
error:
- amdgpu_vm_invalidate_pds(adev, vm);
-exit:
drm_dev_exit(idx);
return r;
}
-/*
- * amdgpu_vm_update_flags - figure out flags for PTE updates
- *
- * Make sure to set the right flags for the PTEs at the desired level.
- */
-static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
- struct amdgpu_bo_vm *pt, unsigned int level,
- uint64_t pe, uint64_t addr,
- unsigned int count, uint32_t incr,
- uint64_t flags)
-
-{
- if (level != AMDGPU_VM_PTB) {
- flags |= AMDGPU_PDE_PTE;
- amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
-
- } else if (params->adev->asic_type >= CHIP_VEGA10 &&
- !(flags & AMDGPU_PTE_VALID) &&
- !(flags & AMDGPU_PTE_PRT)) {
-
- /* Workaround for fault priority problem on GMC9 */
- flags |= AMDGPU_PTE_EXECUTABLE;
- }
-
- params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
- flags);
-}
-
/**
- * amdgpu_vm_fragment - get fragment for PTEs
+ * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
+ * @fence: unused
+ * @cb: the callback structure
*
- * @params: see amdgpu_vm_update_params definition
- * @start: first PTE to handle
- * @end: last PTE to handle
- * @flags: hw mapping flags
- * @frag: resulting fragment size
- * @frag_end: end of this fragment
- *
- * Returns the first possible fragment for the start and end address.
- */
-static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
- uint64_t start, uint64_t end, uint64_t flags,
- unsigned int *frag, uint64_t *frag_end)
-{
- /**
- * The MC L1 TLB supports variable sized pages, based on a fragment
- * field in the PTE. When this field is set to a non-zero value, page
- * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
- * flags are considered valid for all PTEs within the fragment range
- * and corresponding mappings are assumed to be physically contiguous.
- *
- * The L1 TLB can store a single PTE for the whole fragment,
- * significantly increasing the space available for translation
- * caching. This leads to large improvements in throughput when the
- * TLB is under pressure.
- *
- * The L2 TLB distributes small and large fragments into two
- * asymmetric partitions. The large fragment cache is significantly
- * larger. Thus, we try to use large fragments wherever possible.
- * Userspace can support this by aligning virtual base address and
- * allocation size to the fragment size.
- *
- * Starting with Vega10 the fragment size only controls the L1. The L2
- * is now directly feed with small/huge/giant pages from the walker.
- */
- unsigned max_frag;
-
- if (params->adev->asic_type < CHIP_VEGA10)
- max_frag = params->adev->vm_manager.fragment_size;
- else
- max_frag = 31;
-
- /* system pages are non continuously */
- if (params->pages_addr) {
- *frag = 0;
- *frag_end = end;
- return;
- }
-
- /* This intentionally wraps around if no bit is set */
- *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
- if (*frag >= max_frag) {
- *frag = max_frag;
- *frag_end = end & ~((1ULL << max_frag) - 1);
- } else {
- *frag_end = start + (1 << *frag);
- }
-}
-
-/**
- * amdgpu_vm_update_ptes - make sure that page tables are valid
- *
- * @params: see amdgpu_vm_update_params definition
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to, the next dst inside the function
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end.
- *
- * Returns:
- * 0 for success, -EINVAL for failure.
+ * Increments the tlb sequence to make sure that future CS execute a VM flush.
*/
-static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
- uint64_t start, uint64_t end,
- uint64_t dst, uint64_t flags)
+static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
{
- struct amdgpu_device *adev = params->adev;
- struct amdgpu_vm_pt_cursor cursor;
- uint64_t frag_start = start, frag_end;
- unsigned int frag;
- int r;
-
- /* figure out the initial fragment */
- amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
-
- /* walk over the address space and update the PTs */
- amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
- while (cursor.pfn < end) {
- unsigned shift, parent_shift, mask;
- uint64_t incr, entry_end, pe_start;
- struct amdgpu_bo *pt;
-
- if (!params->unlocked) {
- /* make sure that the page tables covering the
- * address range are actually allocated
- */
- r = amdgpu_vm_alloc_pts(params->adev, params->vm,
- &cursor, params->immediate);
- if (r)
- return r;
- }
-
- shift = amdgpu_vm_level_shift(adev, cursor.level);
- parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (params->unlocked) {
- /* Unlocked updates are only allowed on the leaves */
- if (amdgpu_vm_pt_descendant(adev, &cursor))
- continue;
- } else if (adev->asic_type < CHIP_VEGA10 &&
- (flags & AMDGPU_PTE_VALID)) {
- /* No huge page support before GMC v9 */
- if (cursor.level != AMDGPU_VM_PTB) {
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
- }
- } else if (frag < shift) {
- /* We can't use this level when the fragment size is
- * smaller than the address shift. Go to the next
- * child entry and try again.
- */
- if (amdgpu_vm_pt_descendant(adev, &cursor))
- continue;
- } else if (frag >= parent_shift) {
- /* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again.
- */
- if (!amdgpu_vm_pt_ancestor(&cursor))
- return -EINVAL;
- continue;
- }
+ struct amdgpu_vm_tlb_seq_cb *tlb_cb;
- pt = cursor.entry->bo;
- if (!pt) {
- /* We need all PDs and PTs for mapping something, */
- if (flags & AMDGPU_PTE_VALID)
- return -ENOENT;
-
- /* but unmapping something can happen at a higher
- * level.
- */
- if (!amdgpu_vm_pt_ancestor(&cursor))
- return -EINVAL;
-
- pt = cursor.entry->bo;
- shift = parent_shift;
- frag_end = max(frag_end, ALIGN(frag_start + 1,
- 1ULL << shift));
- }
-
- /* Looks good so far, calculate parameters for the update */
- incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
- mask = amdgpu_vm_entries_mask(adev, cursor.level);
- pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = ((uint64_t)mask + 1) << shift;
- entry_end += cursor.pfn & ~(entry_end - 1);
- entry_end = min(entry_end, end);
-
- do {
- struct amdgpu_vm *vm = params->vm;
- uint64_t upd_end = min(entry_end, frag_end);
- unsigned nptes = (upd_end - frag_start) >> shift;
- uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
-
- /* This can happen when we set higher level PDs to
- * silent to stop fault floods.
- */
- nptes = max(nptes, 1u);
-
- trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
- min(nptes, 32u), dst, incr, upd_flags,
- vm->task_info.pid,
- vm->immediate.fence_context);
- amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
- cursor.level, pe_start, dst,
- nptes, incr, upd_flags);
-
- pe_start += nptes * 8;
- dst += nptes * incr;
-
- frag_start = upd_end;
- if (frag_start >= frag_end) {
- /* figure out the next fragment */
- amdgpu_vm_fragment(params, frag_start, end,
- flags, &frag, &frag_end);
- if (frag < shift)
- break;
- }
- } while (frag_start < entry_end);
-
- if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Free all child entries.
- * Update the tables with the flags and addresses and free up subsequent
- * tables in the case of huge pages or freed up areas.
- * This is the maximum you can free, because all other page tables are not
- * completely covered by the range and so potentially still in use.
- */
- while (cursor.pfn < frag_start) {
- /* Make sure previous mapping is freed */
- if (cursor.entry->bo) {
- params->table_freed = true;
- amdgpu_vm_free_pts(adev, params->vm, &cursor);
- }
- amdgpu_vm_pt_next(adev, &cursor);
- }
-
- } else if (frag >= shift) {
- /* or just move on to the next on the same level. */
- amdgpu_vm_pt_next(adev, &cursor);
- }
- }
-
- return 0;
+ tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
+ atomic64_inc(&tlb_cb->vm->tlb_seq);
+ kfree(tlb_cb);
}
/**
- * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
+ * amdgpu_vm_update_range - update a range in the vm page table
*
- * @adev: amdgpu_device pointer of the VM
- * @bo_adev: amdgpu_device pointer of the mapped BO
- * @vm: requested vm
+ * @adev: amdgpu_device pointer to use for commands
+ * @vm: the VM to update the range
* @immediate: immediate submission in a page fault
* @unlocked: unlocked invalidation during MM callback
+ * @flush_tlb: trigger tlb invalidation after update completed
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
* @offset: offset into nodes and pages_addr
+ * @vram_base: base for vram mappings
* @res: ttm_resource to map
* @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
- * @table_freed: return true if page table is freed
*
* Fill in the page table entries between @start and @last.
*
* Returns:
- * 0 for success, -EINVAL for failure.
+ * 0 for success, negative erro code for failure.
*/
-int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct amdgpu_device *bo_adev,
- struct amdgpu_vm *vm, bool immediate,
- bool unlocked, struct dma_resv *resv,
- uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset,
- struct ttm_resource *res,
- dma_addr_t *pages_addr,
- struct dma_fence **fence,
- bool *table_freed)
+int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ bool immediate, bool unlocked, bool flush_tlb,
+ struct dma_resv *resv, uint64_t start, uint64_t last,
+ uint64_t flags, uint64_t offset, uint64_t vram_base,
+ struct ttm_resource *res, dma_addr_t *pages_addr,
+ struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
+ struct amdgpu_vm_tlb_seq_cb *tlb_cb;
struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
int r, idx;
@@ -1675,6 +773,18 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
+ tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
+ if (!tlb_cb) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+
+ /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
+ * heavy-weight flush TLB unconditionally.
+ */
+ flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0);
+
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
@@ -1693,7 +803,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
- goto error_unlock;
+ goto error_free;
}
if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
@@ -1706,7 +816,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r)
- goto error_unlock;
+ goto error_free;
amdgpu_res_first(pages_addr ? NULL : res, offset,
(last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
@@ -1746,16 +856,15 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
}
} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
- addr = bo_adev->vm_manager.vram_base_offset +
- cursor.start;
+ addr = vram_base + cursor.start;
} else {
addr = 0;
}
tmp = start + num_entries;
- r = amdgpu_vm_update_ptes(&params, start, tmp, addr, flags);
+ r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
if (r)
- goto error_unlock;
+ goto error_free;
amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
start = tmp;
@@ -1763,8 +872,21 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, fence);
- if (table_freed)
- *table_freed = *table_freed || params.table_freed;
+ if (flush_tlb || params.table_freed) {
+ tlb_cb->vm = vm;
+ if (fence && *fence &&
+ !dma_fence_add_callback(*fence, &tlb_cb->cb,
+ amdgpu_vm_tlb_seq_cb)) {
+ dma_fence_put(vm->last_tlb_flush);
+ vm->last_tlb_flush = dma_fence_get(*fence);
+ } else {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
+ }
+ tlb_cb = NULL;
+ }
+
+error_free:
+ kfree(tlb_cb);
error_unlock:
amdgpu_vm_eviction_unlock(vm);
@@ -1822,7 +944,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* @adev: amdgpu_device pointer
* @bo_va: requested BO and VM object
* @clear: if true clear the entries
- * @table_freed: return true if page table is freed
*
* Fill in the page table entries for @bo_va.
*
@@ -1830,7 +951,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
* 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed)
+ bool clear)
{
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
@@ -1838,9 +959,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL;
struct ttm_resource *mem;
struct dma_fence **last_update;
+ bool flush_tlb = clear;
struct dma_resv *resv;
+ uint64_t vram_base;
uint64_t flags;
- struct amdgpu_device *bo_adev = adev;
int r;
if (clear || !bo) {
@@ -1865,14 +987,18 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
if (bo) {
+ struct amdgpu_device *bo_adev;
+
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
if (amdgpu_bo_encrypted(bo))
flags |= AMDGPU_PTE_TMZ;
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ vram_base = bo_adev->vm_manager.vram_base_offset;
} else {
flags = 0x0;
+ vram_base = 0;
}
if (clear || (bo && bo->tbo.base.resv ==
@@ -1882,7 +1008,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
last_update = &bo_va->last_pt_update;
if (!clear && bo_va->base.moved) {
- bo_va->base.moved = false;
+ flush_tlb = true;
list_splice_init(&bo_va->valids, &bo_va->invalids);
} else if (bo_va->cleared != clear) {
@@ -1905,11 +1031,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
trace_amdgpu_vm_bo_update(mapping);
- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
- resv, mapping->start,
- mapping->last, update_flags,
- mapping->offset, mem,
- pages_addr, last_update, table_freed);
+ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
+ resv, mapping->start, mapping->last,
+ update_flags, mapping->offset,
+ vram_base, mem, pages_addr,
+ last_update);
if (r)
return r;
}
@@ -1932,6 +1058,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
list_splice_init(&bo_va->invalids, &bo_va->valids);
bo_va->cleared = clear;
+ bo_va->base.moved = false;
if (trace_amdgpu_vm_bo_mapping_enabled()) {
list_for_each_entry(mapping, &bo_va->valids, list)
@@ -2100,10 +1227,10 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false,
- resv, mapping->start,
- mapping->last, init_pte_value,
- 0, NULL, NULL, &f, NULL);
+ r = amdgpu_vm_update_range(adev, vm, false, false, true, resv,
+ mapping->start, mapping->last,
+ init_pte_value, 0, 0, NULL, NULL,
+ &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
@@ -2145,7 +1272,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
- r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
}
@@ -2164,7 +1291,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
else
clear = true;
- r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
@@ -2914,6 +2041,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
vm->last_unlocked = dma_fence_get_stub();
+ vm->last_tlb_flush = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2933,7 +2061,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
- r = amdgpu_vm_clear_bo(adev, vm, root, false);
+ r = amdgpu_vm_pt_clear(adev, vm, root, false);
if (r)
goto error_unreserve;
@@ -2952,6 +2080,7 @@ error_free_root:
vm->root.bo = NULL;
error_free_delayed:
+ dma_fence_put(vm->last_tlb_flush);
dma_fence_put(vm->last_unlocked);
drm_sched_entity_destroy(&vm->delayed);
@@ -2962,34 +2091,6 @@ error_free_immediate:
}
/**
- * amdgpu_vm_check_clean_reserved - check if a VM is clean
- *
- * @adev: amdgpu_device pointer
- * @vm: the VM to check
- *
- * check all entries of the root PD, if any subsequent PDs are allocated,
- * it means there are page table creating and filling, and is no a clean
- * VM
- *
- * Returns:
- * 0 if this VM is clean
- */
-static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- enum amdgpu_vm_level root = adev->vm_manager.root_level;
- unsigned int entries = amdgpu_vm_num_entries(adev, root);
- unsigned int i = 0;
-
- for (i = 0; i < entries; i++) {
- if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
* @adev: amdgpu_device pointer
@@ -3018,17 +2119,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
return r;
/* Sanity checks */
- r = amdgpu_vm_check_clean_reserved(adev, vm);
- if (r)
+ if (!amdgpu_vm_pt_is_root_clean(adev, vm)) {
+ r = -EINVAL;
goto unreserve_bo;
+ }
/* Check if PD needs to be reinitialized and do it before
* changing any other state, in case it fails.
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
- r = amdgpu_vm_clear_bo(adev, vm,
- to_amdgpu_bo_vm(vm->root.bo),
+ r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo),
false);
if (r)
goto unreserve_bo;
@@ -3096,6 +2197,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
+ unsigned long flags;
int i;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
@@ -3105,6 +2207,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_set_pasid(adev, vm, 0);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
+ dma_fence_wait(vm->last_tlb_flush, false);
+ /* Make sure that all fence callbacks have completed */
+ spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
+ spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
+ dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3116,7 +2223,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- amdgpu_vm_free_pts(adev, vm, NULL);
+ amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(root);
amdgpu_bo_unref(&root);
WARN_ON(vm->root.bo);
@@ -3376,9 +2483,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
goto error_unlock;
}
- r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
- addr, flags, value, NULL, NULL, NULL,
- NULL);
+ r = amdgpu_vm_update_range(adev, vm, true, false, false, NULL, addr,
+ addr, flags, value, 0, NULL, NULL, NULL);
if (r)
goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index bd7892482bbf..9ecb7f663e19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -284,6 +284,10 @@ struct amdgpu_vm {
struct drm_sched_entity immediate;
struct drm_sched_entity delayed;
+ /* Last finished delayed update */
+ atomic64_t tlb_seq;
+ struct dma_fence *last_tlb_flush;
+
/* Last unlocked submission to the scheduler entities */
struct dma_fence *last_unlocked;
@@ -395,18 +399,17 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct dma_fence **fence);
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct amdgpu_device *bo_adev,
- struct amdgpu_vm *vm, bool immediate,
- bool unlocked, struct dma_resv *resv,
- uint64_t start, uint64_t last,
- uint64_t flags, uint64_t offset,
- struct ttm_resource *res,
- dma_addr_t *pages_addr,
- struct dma_fence **fence, bool *free_table);
+void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo);
+int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ bool immediate, bool unlocked, bool flush_tlb,
+ struct dma_resv *resv, uint64_t start, uint64_t last,
+ uint64_t flags, uint64_t offset, uint64_t vram_base,
+ struct ttm_resource *res, dma_addr_t *pages_addr,
+ struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
- bool clear, bool *table_freed);
+ bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
@@ -455,8 +458,34 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);
+int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_vm *vmbo, bool immediate);
+int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int level, bool immediate, struct amdgpu_bo_vm **vmbo);
+void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+
+int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_bo_base *entry);
+int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint64_t flags);
+
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
#endif
+/**
+ * amdgpu_vm_tlb_seq - return tlb flush sequence number
+ * @vm: the amdgpu_vm structure to query
+ *
+ * Returns the tlb flush sequence number which indicates that the VM TLBs needs
+ * to be invalidated whenever the sequence number change.
+ */
+static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
+{
+ return atomic64_read(&vm->tlb_seq);
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
new file mode 100644
index 000000000000..7761a3ea172e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -0,0 +1,977 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "amdgpu_vm.h"
+
+/*
+ * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
+ */
+struct amdgpu_vm_pt_cursor {
+ uint64_t pfn;
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_bo_base *entry;
+ unsigned int level;
+};
+
+/**
+ * amdgpu_vm_pt_level_shift - return the addr shift for each level
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
+ */
+static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ switch (level) {
+ case AMDGPU_VM_PDB2:
+ case AMDGPU_VM_PDB1:
+ case AMDGPU_VM_PDB0:
+ return 9 * (AMDGPU_VM_PDB0 - level) +
+ adev->vm_manager.block_size;
+ case AMDGPU_VM_PTB:
+ return 0;
+ default:
+ return ~0;
+ }
+}
+
+/**
+ * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The number of entries in a page directory or page table.
+ */
+static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ unsigned int shift;
+
+ shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
+ if (level == adev->vm_manager.root_level)
+ /* For the root directory */
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
+ else if (level != AMDGPU_VM_PTB)
+ /* Everything in between */
+ return 512;
+
+ /* For the page tables on the leaves */
+ return AMDGPU_VM_PTE_COUNT(adev);
+}
+
+/**
+ * amdgpu_vm_pt_num_ats_entries - return the number of ATS entries in the root PD
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * The number of entries in the root page directory which needs the ATS setting.
+ */
+static unsigned int amdgpu_vm_pt_num_ats_entries(struct amdgpu_device *adev)
+{
+ unsigned int shift;
+
+ shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
+ return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
+}
+
+/**
+ * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The mask to extract the entry number of a PD/PT from an address.
+ */
+static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ if (level <= adev->vm_manager.root_level)
+ return 0xffffffff;
+ else if (level != AMDGPU_VM_PTB)
+ return 0x1ff;
+ else
+ return AMDGPU_VM_PTE_COUNT(adev) - 1;
+}
+
+/**
+ * amdgpu_vm_pt_size - returns the size of the page table in bytes
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
+ */
+static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
+}
+
+/**
+ * amdgpu_vm_pt_parent - get the parent page directory
+ *
+ * @pt: child page table
+ *
+ * Helper to get the parent entry for the child page table. NULL if we are at
+ * the root page directory.
+ */
+static struct amdgpu_vm_bo_base *
+amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
+{
+ struct amdgpu_bo *parent = pt->bo->parent;
+
+ if (!parent)
+ return NULL;
+
+ return parent->vm_bo;
+}
+
+/**
+ * amdgpu_vm_pt_start - start PD/PT walk
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm structure
+ * @start: start address of the walk
+ * @cursor: state to initialize
+ *
+ * Initialize a amdgpu_vm_pt_cursor to start a walk.
+ */
+static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, uint64_t start,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ cursor->pfn = start;
+ cursor->parent = NULL;
+ cursor->entry = &vm->root;
+ cursor->level = adev->vm_manager.root_level;
+}
+
+/**
+ * amdgpu_vm_pt_descendant - go to child node
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk to the child node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ unsigned int mask, shift, idx;
+
+ if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
+ !cursor->entry->bo)
+ return false;
+
+ mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
+ shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
+
+ ++cursor->level;
+ idx = (cursor->pfn >> shift) & mask;
+ cursor->parent = cursor->entry;
+ cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_sibling - go to sibling node
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk to the sibling node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+
+ unsigned int shift, num_entries;
+ struct amdgpu_bo_vm *parent;
+
+ /* Root doesn't have a sibling */
+ if (!cursor->parent)
+ return false;
+
+ /* Go to our parents and see if we got a sibling */
+ shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
+ num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
+ parent = to_amdgpu_bo_vm(cursor->parent->bo);
+
+ if (cursor->entry == &parent->entries[num_entries - 1])
+ return false;
+
+ cursor->pfn += 1ULL << shift;
+ cursor->pfn &= ~((1ULL << shift) - 1);
+ ++cursor->entry;
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_ancestor - go to parent node
+ *
+ * @cursor: current state
+ *
+ * Walk to the parent node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (!cursor->parent)
+ return false;
+
+ --cursor->level;
+ cursor->entry = cursor->parent;
+ cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_next - get next PD/PT in hieratchy
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk the PD/PT tree to the next node.
+ */
+static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ /* First try a newborn child */
+ if (amdgpu_vm_pt_descendant(adev, cursor))
+ return;
+
+ /* If that didn't worked try to find a sibling */
+ while (!amdgpu_vm_pt_sibling(adev, cursor)) {
+ /* No sibling, go to our parents and grandparents */
+ if (!amdgpu_vm_pt_ancestor(cursor)) {
+ cursor->pfn = ~0ll;
+ return;
+ }
+ }
+}
+
+/**
+ * amdgpu_vm_pt_first_dfs - start a deep first search
+ *
+ * @adev: amdgpu_device structure
+ * @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
+ * @cursor: state to initialize
+ *
+ * Starts a deep first traversal of the PD/PT tree.
+ */
+static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (start)
+ *cursor = *start;
+ else
+ amdgpu_vm_pt_start(adev, vm, 0, cursor);
+
+ while (amdgpu_vm_pt_descendant(adev, cursor))
+ ;
+}
+
+/**
+ * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
+ *
+ * @start: starting point for the search
+ * @entry: current entry
+ *
+ * Returns:
+ * True when the search should continue, false otherwise.
+ */
+static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_bo_base *entry)
+{
+ return entry && (!start || entry != start->entry);
+}
+
+/**
+ * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
+ *
+ * @adev: amdgpu_device structure
+ * @cursor: current state
+ *
+ * Move the cursor to the next node in a deep first search.
+ */
+static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (!cursor->entry)
+ return;
+
+ if (!cursor->parent)
+ cursor->entry = NULL;
+ else if (amdgpu_vm_pt_sibling(adev, cursor))
+ while (amdgpu_vm_pt_descendant(adev, cursor))
+ ;
+ else
+ amdgpu_vm_pt_ancestor(cursor);
+}
+
+/*
+ * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
+ */
+#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
+ for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
+ amdgpu_vm_pt_continue_dfs((start), (entry)); \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
+
+/**
+ * amdgpu_vm_pt_clear - initially clear the PDs/PTs
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
+ * @vmbo: BO to clear
+ * @immediate: use an immediate update
+ *
+ * Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_vm *vmbo, bool immediate)
+{
+ unsigned int level = adev->vm_manager.root_level;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_bo *ancestor = &vmbo->bo;
+ unsigned int entries, ats_entries;
+ struct amdgpu_bo *bo = &vmbo->bo;
+ uint64_t addr;
+ int r, idx;
+
+ /* Figure out our place in the hierarchy */
+ if (ancestor->parent) {
+ ++level;
+ while (ancestor->parent->parent) {
+ ++level;
+ ancestor = ancestor->parent;
+ }
+ }
+
+ entries = amdgpu_bo_size(bo) / 8;
+ if (!vm->pte_support_ats) {
+ ats_entries = 0;
+
+ } else if (!bo->parent) {
+ ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+
+ } else {
+ struct amdgpu_vm_bo_base *pt;
+
+ pt = ancestor->vm_bo;
+ ats_entries = amdgpu_vm_pt_num_ats_entries(adev);
+ if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >=
+ ats_entries) {
+ ats_entries = 0;
+ } else {
+ ats_entries = entries;
+ entries = 0;
+ }
+ }
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return r;
+
+ if (vmbo->shadow) {
+ struct amdgpu_bo *shadow = vmbo->shadow;
+
+ r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
+ if (r)
+ return r;
+ }
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
+ r = vm->update_funcs->map_table(vmbo);
+ if (r)
+ goto exit;
+
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.vm = vm;
+ params.immediate = immediate;
+
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
+ if (r)
+ goto exit;
+
+ addr = 0;
+ if (ats_entries) {
+ uint64_t value = 0, flags;
+
+ flags = AMDGPU_PTE_DEFAULT_ATC;
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
+ }
+
+ r = vm->update_funcs->update(&params, vmbo, addr, 0,
+ ats_entries, value, flags);
+ if (r)
+ goto exit;
+
+ addr += ats_entries * 8;
+ }
+
+ if (entries) {
+ uint64_t value = 0, flags = 0;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level,
+ &value, &flags);
+ } else {
+ /* Workaround for fault priority problem on GMC9 */
+ flags = AMDGPU_PTE_EXECUTABLE;
+ }
+ }
+
+ r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
+ value, flags);
+ if (r)
+ goto exit;
+ }
+
+ r = vm->update_funcs->commit(&params, NULL);
+exit:
+ drm_dev_exit(idx);
+ return r;
+}
+
+/**
+ * amdgpu_vm_pt_create - create bo for PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requesting vm
+ * @level: the page table level
+ * @immediate: use a immediate update
+ * @vmbo: pointer to the buffer object pointer
+ */
+int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int level, bool immediate, struct amdgpu_bo_vm **vmbo)
+{
+ struct amdgpu_bo_param bp;
+ struct amdgpu_bo *bo;
+ struct dma_resv *resv;
+ unsigned int num_entries;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+
+ bp.size = amdgpu_vm_pt_size(adev, level);
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ if (level < AMDGPU_VM_PTB)
+ num_entries = amdgpu_vm_pt_num_entries(adev, level);
+ else
+ num_entries = 0;
+
+ bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
+
+ if (vm->use_cpu_for_update)
+ bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ bp.type = ttm_bo_type_kernel;
+ bp.no_wait_gpu = immediate;
+ if (vm->root.bo)
+ bp.resv = vm->root.bo->tbo.base.resv;
+
+ r = amdgpu_bo_create_vm(adev, &bp, vmbo);
+ if (r)
+ return r;
+
+ bo = &(*vmbo)->bo;
+ if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) {
+ (*vmbo)->shadow = NULL;
+ return 0;
+ }
+
+ if (!bp.resv)
+ WARN_ON(dma_resv_lock(bo->tbo.base.resv,
+ NULL));
+ resv = bp.resv;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = amdgpu_vm_pt_size(adev, level);
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = bo->tbo.base.resv;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
+
+ if (!resv)
+ dma_resv_unlock(bo->tbo.base.resv);
+
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return r;
+ }
+
+ (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
+ amdgpu_bo_add_to_shadow_list(*vmbo);
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_pt_alloc - Allocate a specific page table
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: VM to allocate page tables for
+ * @cursor: Which page table to allocate
+ * @immediate: use an immediate update
+ *
+ * Make sure a specific page table or directory is allocated.
+ *
+ * Returns:
+ * 1 if page table needed to be allocated, 0 if page table was already
+ * allocated, negative errno if an error occurred.
+ */
+static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool immediate)
+{
+ struct amdgpu_vm_bo_base *entry = cursor->entry;
+ struct amdgpu_bo *pt_bo;
+ struct amdgpu_bo_vm *pt;
+ int r;
+
+ if (entry->bo)
+ return 0;
+
+ r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
+ if (r)
+ return r;
+
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt_bo = &pt->bo;
+ pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
+ amdgpu_vm_bo_base_init(entry, vm, pt_bo);
+ r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
+ if (r)
+ goto error_free_pt;
+
+ return 0;
+
+error_free_pt:
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt_bo);
+ return r;
+}
+
+/**
+ * amdgpu_vm_pt_free - free one PD/PT
+ *
+ * @entry: PDE to free
+ */
+static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
+{
+ struct amdgpu_bo *shadow;
+
+ if (!entry->bo)
+ return;
+ shadow = amdgpu_bo_shadowed(entry->bo);
+ entry->bo->vm_bo = NULL;
+ list_del(&entry->vm_status);
+ amdgpu_bo_unref(&shadow);
+ amdgpu_bo_unref(&entry->bo);
+}
+
+/**
+ * amdgpu_vm_pt_free_dfs - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ * @start: optional cursor where to start freeing PDs/PTs
+ *
+ * Free the page directory or page table level and all sub levels.
+ */
+static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start)
+{
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_bo_base *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ amdgpu_vm_pt_free(entry);
+
+ if (start)
+ amdgpu_vm_pt_free(start->entry);
+}
+
+/**
+ * amdgpu_vm_pt_free_root - free root PD
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ *
+ * Free the root page directory and everything below it.
+ */
+void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ amdgpu_vm_pt_free_dfs(adev, vm, NULL);
+}
+
+/**
+ * amdgpu_vm_pt_is_root_clean - check if a root PD is clean
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: the VM to check
+ *
+ * Check all entries of the root PD, if any subsequent PDs are allocated,
+ * it means there are page table creating and filling, and is no a clean
+ * VM
+ *
+ * Returns:
+ * 0 if this VM is clean
+ */
+bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ enum amdgpu_vm_level root = adev->vm_manager.root_level;
+ unsigned int entries = amdgpu_vm_pt_num_entries(adev, root);
+ unsigned int i = 0;
+
+ for (i = 0; i < entries; i++) {
+ if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo)
+ return false;
+ }
+ return true;
+}
+
+/**
+ * amdgpu_vm_pde_update - update a single level in the hierarchy
+ *
+ * @params: parameters for the update
+ * @entry: entry to update
+ *
+ * Makes sure the requested entry in parent is up to date.
+ */
+int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_bo_base *entry)
+{
+ struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
+ struct amdgpu_bo *bo = parent->bo, *pbo;
+ struct amdgpu_vm *vm = params->vm;
+ uint64_t pde, pt, flags;
+ unsigned int level;
+
+ for (level = 0, pbo = bo->parent; pbo; ++level)
+ pbo = pbo->parent;
+
+ level += params->adev->vm_manager.root_level;
+ amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
+ pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
+ return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
+ 1, 0, flags);
+}
+
+/*
+ * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
+ *
+ * Make sure to set the right flags for the PTEs at the desired level.
+ */
+static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
+ struct amdgpu_bo_vm *pt,
+ unsigned int level,
+ uint64_t pe, uint64_t addr,
+ unsigned int count, uint32_t incr,
+ uint64_t flags)
+
+{
+ if (level != AMDGPU_VM_PTB) {
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
+
+ } else if (params->adev->asic_type >= CHIP_VEGA10 &&
+ !(flags & AMDGPU_PTE_VALID) &&
+ !(flags & AMDGPU_PTE_PRT)) {
+
+ /* Workaround for fault priority problem on GMC9 */
+ flags |= AMDGPU_PTE_EXECUTABLE;
+ }
+
+ params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
+ flags);
+}
+
+/**
+ * amdgpu_vm_pte_fragment - get fragment for PTEs
+ *
+ * @params: see amdgpu_vm_update_params definition
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @flags: hw mapping flags
+ * @frag: resulting fragment size
+ * @frag_end: end of this fragment
+ *
+ * Returns the first possible fragment for the start and end address.
+ */
+static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
+ uint64_t start, uint64_t end, uint64_t flags,
+ unsigned int *frag, uint64_t *frag_end)
+{
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ *
+ * Starting with Vega10 the fragment size only controls the L1. The L2
+ * is now directly feed with small/huge/giant pages from the walker.
+ */
+ unsigned int max_frag;
+
+ if (params->adev->asic_type < CHIP_VEGA10)
+ max_frag = params->adev->vm_manager.fragment_size;
+ else
+ max_frag = 31;
+
+ /* system pages are non continuously */
+ if (params->pages_addr) {
+ *frag = 0;
+ *frag_end = end;
+ return;
+ }
+
+ /* This intentionally wraps around if no bit is set */
+ *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
+ if (*frag >= max_frag) {
+ *frag = max_frag;
+ *frag_end = end & ~((1ULL << max_frag) - 1);
+ } else {
+ *frag_end = start + (1 << *frag);
+ }
+}
+
+/**
+ * amdgpu_vm_ptes_update - make sure that page tables are valid
+ *
+ * @params: see amdgpu_vm_update_params definition
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to, the next dst inside the function
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
+ */
+int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint64_t flags)
+{
+ struct amdgpu_device *adev = params->adev;
+ struct amdgpu_vm_pt_cursor cursor;
+ uint64_t frag_start = start, frag_end;
+ unsigned int frag;
+ int r;
+
+ /* figure out the initial fragment */
+ amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
+ &frag_end);
+
+ /* walk over the address space and update the PTs */
+ amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
+ while (cursor.pfn < end) {
+ unsigned int shift, parent_shift, mask;
+ uint64_t incr, entry_end, pe_start;
+ struct amdgpu_bo *pt;
+
+ if (!params->unlocked) {
+ /* make sure that the page tables covering the
+ * address range are actually allocated
+ */
+ r = amdgpu_vm_pt_alloc(params->adev, params->vm,
+ &cursor, params->immediate);
+ if (r)
+ return r;
+ }
+
+ shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
+ parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
+ if (params->unlocked) {
+ /* Unlocked updates are only allowed on the leaves */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
+ /* No huge page support before GMC v9 */
+ if (cursor.level != AMDGPU_VM_PTB) {
+ if (!amdgpu_vm_pt_descendant(adev, &cursor))
+ return -ENOENT;
+ continue;
+ }
+ } else if (frag < shift) {
+ /* We can't use this level when the fragment size is
+ * smaller than the address shift. Go to the next
+ * child entry and try again.
+ */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (frag >= parent_shift) {
+ /* If the fragment size is even larger than the parent
+ * shift we should go up one level and check it again.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+ continue;
+ }
+
+ pt = cursor.entry->bo;
+ if (!pt) {
+ /* We need all PDs and PTs for mapping something, */
+ if (flags & AMDGPU_PTE_VALID)
+ return -ENOENT;
+
+ /* but unmapping something can happen at a higher
+ * level.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+
+ pt = cursor.entry->bo;
+ shift = parent_shift;
+ frag_end = max(frag_end, ALIGN(frag_start + 1,
+ 1ULL << shift));
+ }
+
+ /* Looks good so far, calculate parameters for the update */
+ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
+ mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
+ pe_start = ((cursor.pfn >> shift) & mask) * 8;
+ entry_end = ((uint64_t)mask + 1) << shift;
+ entry_end += cursor.pfn & ~(entry_end - 1);
+ entry_end = min(entry_end, end);
+
+ do {
+ struct amdgpu_vm *vm = params->vm;
+ uint64_t upd_end = min(entry_end, frag_end);
+ unsigned int nptes = (upd_end - frag_start) >> shift;
+ uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
+
+ /* This can happen when we set higher level PDs to
+ * silent to stop fault floods.
+ */
+ nptes = max(nptes, 1u);
+
+ trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+ min(nptes, 32u), dst, incr,
+ upd_flags,
+ vm->task_info.pid,
+ vm->immediate.fence_context);
+ amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
+ cursor.level, pe_start, dst,
+ nptes, incr, upd_flags);
+
+ pe_start += nptes * 8;
+ dst += nptes * incr;
+
+ frag_start = upd_end;
+ if (frag_start >= frag_end) {
+ /* figure out the next fragment */
+ amdgpu_vm_pte_fragment(params, frag_start, end,
+ flags, &frag, &frag_end);
+ if (frag < shift)
+ break;
+ }
+ } while (frag_start < entry_end);
+
+ if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+ /* Free all child entries.
+ * Update the tables with the flags and addresses and free up subsequent
+ * tables in the case of huge pages or freed up areas.
+ * This is the maximum you can free, because all other page tables are not
+ * completely covered by the range and so potentially still in use.
+ */
+ while (cursor.pfn < frag_start) {
+ /* Make sure previous mapping is freed */
+ if (cursor.entry->bo) {
+ params->table_freed = true;
+ amdgpu_vm_pt_free_dfs(adev, params->vm,
+ &cursor);
+ }
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
+
+ } else if (frag >= shift) {
+ /* or just move on to the next on the same level. */
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index bdb44cee19d3..1fd3cbca20a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -109,7 +109,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (p->unlocked) {
struct dma_fence *tmp = dma_fence_get(f);
- swap(p->vm->last_unlocked, f);
+ swap(p->vm->last_unlocked, tmp);
dma_fence_put(tmp);
} else {
amdgpu_bo_fence(p->vm->root.bo, f, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 7326b6c1b71c..e78e4c27b62a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -1,34 +1,33 @@
/*
- * Copyright 2018-2019 Advanced Micro Devices, Inc.
+ * Copyright (c) 2018-2021 Advanced Micro Devices, Inc. All rights reserved.
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
*/
#ifndef AMDGV_SRIOV_MSG__H_
#define AMDGV_SRIOV_MSG__H_
/* unit in kilobytes */
-#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
-#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
-#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
-#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
+#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
/*
* layout
@@ -51,10 +50,10 @@
* v2 defined in amdgim
* v3 current
*/
-#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER 2
-#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER 3
+#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER 2
+#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER 3
-#define AMD_SRIOV_MSG_RESERVE_UCODE 24
+#define AMD_SRIOV_MSG_RESERVE_UCODE 24
#define AMD_SRIOV_MSG_RESERVE_VCN_INST 4
@@ -83,19 +82,19 @@ enum amd_sriov_ucode_engine_id {
AMD_SRIOV_UCODE_ID__MAX
};
-#pragma pack(push, 1) // PF2VF / VF2PF data areas are byte packed
+#pragma pack(push, 1) // PF2VF / VF2PF data areas are byte packed
union amd_sriov_msg_feature_flags {
struct {
- uint32_t error_log_collect : 1;
- uint32_t host_load_ucodes : 1;
- uint32_t host_flr_vramlost : 1;
- uint32_t mm_bw_management : 1;
- uint32_t pp_one_vf_mode : 1;
- uint32_t reg_indirect_acc : 1;
- uint32_t reserved : 26;
+ uint32_t error_log_collect : 1;
+ uint32_t host_load_ucodes : 1;
+ uint32_t host_flr_vramlost : 1;
+ uint32_t mm_bw_management : 1;
+ uint32_t pp_one_vf_mode : 1;
+ uint32_t reg_indirect_acc : 1;
+ uint32_t reserved : 26;
} flags;
- uint32_t all;
+ uint32_t all;
};
union amd_sriov_reg_access_flags {
@@ -110,10 +109,10 @@ union amd_sriov_reg_access_flags {
union amd_sriov_msg_os_info {
struct {
- uint32_t windows : 1;
- uint32_t reserved : 31;
+ uint32_t windows : 1;
+ uint32_t reserved : 31;
} info;
- uint32_t all;
+ uint32_t all;
};
struct amd_sriov_msg_uuid_info {
@@ -156,6 +155,7 @@ struct amd_sriov_msg_pf2vf_info_header {
uint32_t reserved[2];
};
+#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (48)
struct amd_sriov_msg_pf2vf_info {
/* header contains size and version */
struct amd_sriov_msg_pf2vf_info_header header;
@@ -204,10 +204,10 @@ struct amd_sriov_msg_pf2vf_info {
} mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
/* UUID info */
struct amd_sriov_msg_uuid_info uuid_info;
- /* pcie atomic Ops info */
- uint32_t pcie_atomic_ops_enabled_flags;
+ /* PCIE atomic ops support flag */
+ uint32_t pcie_atomic_ops_support_flags;
/* reserved */
- uint32_t reserved[256 - 48];
+ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
};
struct amd_sriov_msg_vf2pf_info_header {
@@ -219,12 +219,13 @@ struct amd_sriov_msg_vf2pf_info_header {
uint32_t reserved[2];
};
+#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (70)
struct amd_sriov_msg_vf2pf_info {
/* header contains size and version */
struct amd_sriov_msg_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
- uint8_t driver_version[64];
+ uint8_t driver_version[64];
/* driver certification, 1=WHQL, 0=None */
uint32_t driver_cert;
/* guest OS type and version */
@@ -258,13 +259,13 @@ struct amd_sriov_msg_vf2pf_info {
uint32_t fb_size;
/* guest ucode data, each one is 1.25 Dword */
struct {
- uint8_t id;
+ uint8_t id;
uint32_t version;
} ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
uint64_t dummy_page_addr;
/* reserved */
- uint32_t reserved[256-70];
+ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
};
/* mailbox message send from guest to host */
@@ -276,7 +277,7 @@ enum amd_sriov_mailbox_request_message {
MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
MB_REQ_MSG_REQ_GPU_INIT_DATA,
- MB_REQ_MSG_LOG_VF_ERROR = 200,
+ MB_REQ_MSG_LOG_VF_ERROR = 200,
};
/* mailbox message send from host to guest */
@@ -298,17 +299,15 @@ enum amd_sriov_gpu_init_data_version {
GPU_INIT_DATA_READY_V1 = 1,
};
-#pragma pack(pop) // Restore previous packing option
+#pragma pack(pop) // Restore previous packing option
/* checksum function between host and guest */
-unsigned int amd_sriov_msg_checksum(void *obj,
- unsigned long obj_size,
- unsigned int key,
- unsigned int checksum);
+unsigned int amd_sriov_msg_checksum(void *obj, unsigned long obj_size, unsigned int key,
+ unsigned int checksum);
/* assertion at compile time */
#ifdef __linux__
-#define stringification(s) _stringification(s)
+#define stringification(s) _stringification(s)
#define _stringification(s) #s
_Static_assert(
@@ -319,13 +318,11 @@ _Static_assert(
sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
"amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
-_Static_assert(
- AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
- "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
+_Static_assert(AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
-_Static_assert(
- AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
- "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
+_Static_assert(AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
#undef _stringification
#undef stringification
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
index 88642e7ecdf4..a13c443ea10f 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -87,7 +87,7 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
index b279af59e34f..6be0a6704ea7 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
@@ -25,6 +25,6 @@
int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
-void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index a720436857b4..a9521c98e7f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -93,7 +93,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h
index 02932c1c8bab..8b763f6dfd81 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h
@@ -25,6 +25,6 @@
int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
-void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index ad8e87d3d2cb..78508ae6a670 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -85,7 +85,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h
index 5e6824c0f591..b799f14bce03 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h
@@ -25,6 +25,6 @@
int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
enum amd_clockgating_state state);
-void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u32 *flags);
+void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u64 *flags);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index a92d86e12718..d4f5a584075d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -765,7 +765,6 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
- int igp_lane_info = 0;
int dig_encoder = dig->dig_encoder;
int hpd_id = AMDGPU_HPD_NONE;
@@ -848,26 +847,6 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
- if ((adev->flags & AMD_IS_APU) &&
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
- if (is_dp ||
- !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
- if (igp_lane_info & 0x1)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (igp_lane_info & 0x2)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (igp_lane_info & 0x4)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (igp_lane_info & 0x8)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
- } else {
- if (igp_lane_info & 0x3)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (igp_lane_info & 0xc)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
- }
- }
-
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index 2d01ac0d4c11..b991609f46c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -99,7 +99,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
u32 tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index f4dfca013ec5..483a441b46aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -332,7 +332,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
}
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
u32 tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 9426e252d8aa..54446162db8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4741,7 +4741,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
- AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
hw_prio, NULL);
@@ -8451,7 +8451,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
return 0;
}
-static void gfx_v10_0_get_clockgating_state(void *handle, u32 *flags)
+static void gfx_v10_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5f112efda634..25dc729d0ec2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1925,7 +1925,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ring->pipe;
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
- AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
+ AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
hw_prio, NULL);
@@ -5475,7 +5475,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
return 0;
}
-static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags)
+static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 46d4bf27ebbb..d58fd83524ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+ /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+ { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
{ 0, 0, 0, 0, 0 },
};
@@ -2274,7 +2276,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
- AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
hw_prio, NULL);
@@ -5231,7 +5233,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
return 0;
}
-static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
+static void gfx_v9_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 7653ebd0e67b..3a797424579c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -1930,6 +1930,19 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
mutex_unlock(&adev->grbm_idx_mutex);
}
+static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev)
+{
+ u32 status = 0;
+ struct amdgpu_vmhub *hub;
+
+ hub = &adev->vmhub[AMDGPU_GFXHUB_0];
+ status = RREG32(hub->vm_l2_pro_fault_status);
+ /* reset page fault status */
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
+
+ return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED);
+}
+
struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = {
.ras_error_inject = &gfx_v9_4_2_ras_error_inject,
.query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
@@ -1943,4 +1956,5 @@ struct amdgpu_gfx_ras gfx_v9_4_2_ras = {
.hw_ops = &gfx_v9_4_2_ras_ops,
},
.enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
+ .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 5228421b0f72..20946bc7fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /*
+ * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
+ * is a new problem observed at DF 3.0.3, however with the same suspend sequence not
+ * seen any issue on the DF 3.0.2 series platform.
+ */
+ if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
+ dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
+ return 0;
+ }
+
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
@@ -1161,7 +1171,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
return athub_v2_0_set_clockgating(adev, state);
}
-static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
+static void gmc_v10_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1932a3e4af7e..382dde1ce74c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1690,7 +1690,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
return 0;
}
-static void gmc_v8_0_get_clockgating_state(void *handle, u32 *flags)
+static void gmc_v8_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 6009fbfdcc19..22761a3bb818 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1948,7 +1948,7 @@ static int gmc_v9_0_set_clockgating_state(void *handle,
return 0;
}
-static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
+static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 046216635262..adf89680f53e 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -124,7 +124,7 @@ static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
}
static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
index 5793977953cc..a9ea23fa0def 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c
@@ -181,7 +181,7 @@ static void hdp_v5_0_update_clock_gating(struct amdgpu_device *adev,
}
static void hdp_v5_0_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
uint32_t tmp;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index a29c86617fb5..8c3227d0b8b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -26,6 +26,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "jpeg_v2_0.h"
+#include "jpeg_v2_5.h"
#include "vcn/vcn_2_5_offset.h"
#include "vcn/vcn_2_5_sh_mask.h"
@@ -39,6 +40,7 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_5_set_powergating_state(void *handle,
enum amd_powergating_state state);
+static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev);
static int amdgpu_ih_clientid_jpeg[] = {
SOC15_IH_CLIENTID_VCN,
@@ -70,6 +72,7 @@ static int jpeg_v2_5_early_init(void *handle)
jpeg_v2_5_set_dec_ring_funcs(adev);
jpeg_v2_5_set_irq_funcs(adev);
+ jpeg_v2_5_set_ras_funcs(adev);
return 0;
}
@@ -730,3 +733,74 @@ const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
.rev = 0,
.funcs = &jpeg_v2_6_ip_funcs,
};
+
+static uint32_t jpeg_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_JPEG_V2_6_JPEG0:
+ reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG0_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG0_STATUS, POISONED_PF);
+ break;
+ case AMDGPU_JPEG_V2_6_JPEG1:
+ reg_value = RREG32_SOC15(JPEG, instance, mmUVD_RAS_JPEG1_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_JPEG1_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in JPEG%d sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool jpeg_v2_6_query_ras_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst = 0, sub = 0, poison_stat = 0;
+
+ for (inst = 0; inst < adev->jpeg.num_jpeg_inst; inst++)
+ for (sub = 0; sub < AMDGPU_JPEG_V2_6_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ jpeg_v2_6_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
+ .query_poison_status = jpeg_v2_6_query_ras_poison_status,
+};
+
+static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
+ .ras_block = {
+ .hw_ops = &jpeg_v2_6_ras_hw_ops,
+ },
+};
+
+static void jpeg_v2_5_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[JPEG_HWIP][0]) {
+ case IP_VERSION(2, 6, 0):
+ adev->jpeg.ras = &jpeg_v2_6_ras;
+ break;
+ default:
+ break;
+ }
+
+ if (adev->jpeg.ras) {
+ amdgpu_ras_register_ras_block(adev, &adev->jpeg.ras->ras_block);
+
+ strcpy(adev->jpeg.ras->ras_block.ras_comm.name, "jpeg");
+ adev->jpeg.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
+ adev->jpeg.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->jpeg.ras_if = &adev->jpeg.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->jpeg.ras->ras_block.ras_late_init)
+ adev->jpeg.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
index 3b0aa29b9879..1e858c6cdf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
@@ -24,6 +24,13 @@
#ifndef __JPEG_V2_5_H__
#define __JPEG_V2_5_H__
+enum amdgpu_jpeg_v2_6_sub_block {
+ AMDGPU_JPEG_V2_6_JPEG0 = 0,
+ AMDGPU_JPEG_V2_6_JPEG1,
+
+ AMDGPU_JPEG_V2_6_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
extern const struct amdgpu_ip_block_version jpeg_v2_6_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 4c9f0c0f3116..3f44a099c52a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -546,7 +546,7 @@ static int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data, data1;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 3b901f941627..6fa7090bc6cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -542,7 +542,7 @@ static int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-static void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data, data1;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 3718ff610ab2..636abd855686 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -682,7 +682,7 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data, data1;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 1957fb098c4d..ff44c5364a8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -577,7 +577,7 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data, data1, data2, data3;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 619106f7d23d..6e0145b2b408 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -647,7 +647,7 @@ static int mmhub_v9_4_set_clockgating(struct amdgpu_device *adev,
return 0;
}
-static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+static void mmhub_v9_4_get_clockgating(struct amdgpu_device *adev, u64 *flags)
{
int data, data1;
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 8ce5b8ca1fd7..97201ab0965e 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -685,7 +685,7 @@ static int navi10_ih_set_powergating_state(void *handle,
return 0;
}
-static void navi10_ih_get_clockgating_state(void *handle, u32 *flags)
+static void navi10_ih_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index ee7cab37dfd5..6cd1fb2eb913 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -278,7 +278,7 @@ static void nbio_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
static void nbio_v2_3_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 4bbacf1be25a..f7f6ddebd3e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -210,7 +210,7 @@ static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 37a4039fdfc5..aa0326d00c72 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -205,7 +205,7 @@ static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 6f81de6f3cc4..31776b12e4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -306,7 +306,7 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
static void nbio_v7_2_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index c2357e83a8c4..4531761dcf77 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -273,7 +273,7 @@ static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev
}
static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev,
- u32 *flags)
+ u64 *flags)
{
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index e19f14c3ef59..0a7946c59a42 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -1115,7 +1115,7 @@ static int nv_common_set_powergating_state(void *handle,
return 0;
}
-static void nv_common_get_clockgating_state(void *handle, u32 *flags)
+static void nv_common_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 4ef4feff5649..3695374896ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1535,7 +1535,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
return 0;
}
-static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags)
+static void sdma_v3_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index d7e8f7232364..8589ab1c9800 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2372,7 +2372,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,
return 0;
}
-static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
+static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index a8d49c005f73..775aabde1ae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1648,7 +1648,7 @@ static int sdma_v5_0_set_powergating_state(void *handle,
return 0;
}
-static void sdma_v5_0_get_clockgating_state(void *handle, u32 *flags)
+static void sdma_v5_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 824eace69884..ca50857b982d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1645,7 +1645,7 @@ static int sdma_v5_2_set_powergating_state(void *handle,
return 0;
}
-static void sdma_v5_2_get_clockgating_state(void *handle, u32 *flags)
+static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
index b6f1322f908c..acdc40f99ab3 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c
@@ -59,7 +59,7 @@ static void smuio_v11_0_update_rom_clock_gating(struct amdgpu_device *adev, bool
WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
}
-static void smuio_v11_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+static void smuio_v11_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
index 3a18dbb55c32..2afeb8b37f62 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c
@@ -56,7 +56,7 @@ static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bo
WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
}
-static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
index 39b7c206770f..13e905c22592 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
@@ -58,7 +58,7 @@ static void smuio_v13_0_update_rom_clock_gating(struct amdgpu_device *adev, bool
WREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0, data);
}
-static void smuio_v13_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+static void smuio_v13_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
index 8417890af227..e4e30b9d481b 100644
--- a/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c
@@ -56,7 +56,7 @@ static void smuio_v9_0_update_rom_clock_gating(struct amdgpu_device *adev, bool
WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data);
}
-static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+static void smuio_v9_0_get_clock_gating_state(struct amdgpu_device *adev, u64 *flags)
{
u32 data;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 3d0251ef8d79..3ee7322081d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1419,7 +1419,7 @@ static int soc15_common_set_clockgating_state(void *handle,
return 0;
}
-static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
+static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index c45d9c14ecbc..606892dbea1c 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -64,21 +64,62 @@ static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
}
+static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
+ uint64_t mc_umc_status, uint32_t umc_reg_offset)
+{
+ uint32_t mc_umc_addr;
+ uint64_t reg_value;
+
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
+ dev_info(adev->dev, "Deferred error, no user action is needed.\n");
+
+ if (mc_umc_status)
+ dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
+
+ /* print IPID registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+
+ /* print SYND registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+
+ /* print MISC0 registers value */
+ mc_umc_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
+ reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
+ if (reg_value)
+ dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+}
+
static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_inst, uint32_t ch_inst,
unsigned long *error_count)
{
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
+ uint32_t umc_reg_offset;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst, ch_inst);
+
eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
+
+ umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
+ }
}
static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev,
@@ -88,8 +129,6 @@ static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_dev
uint64_t mc_umc_status;
uint32_t eccinfo_table_idx;
uint32_t umc_reg_offset;
- uint32_t mc_umc_addr;
- uint64_t reg_value;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
umc_reg_offset = get_umc_v6_7_reg_offset(adev,
@@ -106,32 +145,7 @@ static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_dev
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
- dev_info(adev->dev, "Deferred error, no user action is needed.\n");
-
- if (mc_umc_status)
- dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
-
- /* print IPID registers value */
- mc_umc_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
- reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
- if (reg_value)
- dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
-
- /* print SYND registers value */
- mc_umc_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
- reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
- if (reg_value)
- dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
-
- /* print MISC0 registers value */
- mc_umc_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
- reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
- if (reg_value)
- dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+ umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
}
}
@@ -277,8 +291,11 @@ static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
MCUMC_STATUS is a 64 bit register */
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
- REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) {
*error_count += 1;
+
+ umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
+ }
}
static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
@@ -287,8 +304,6 @@ static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev
{
uint64_t mc_umc_status;
uint32_t mc_umc_status_addr;
- uint32_t mc_umc_addr;
- uint64_t reg_value;
mc_umc_status_addr =
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
@@ -303,32 +318,7 @@ static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) {
*error_count += 1;
- if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)
- dev_info(adev->dev, "Deferred error, no user action is needed.\n");
-
- if (mc_umc_status)
- dev_info(adev->dev, "MCA STATUS 0x%llx, umc_reg_offset 0x%x\n", mc_umc_status, umc_reg_offset);
-
- /* print IPID registers value */
- mc_umc_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_IPIDT0);
- reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
- if (reg_value)
- dev_info(adev->dev, "MCA IPID 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
-
- /* print SYND registers value */
- mc_umc_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_SYNDT0);
- reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
- if (reg_value)
- dev_info(adev->dev, "MCA SYND 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
-
- /* print MISC0 registers value */
- mc_umc_addr =
- SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_MISC0T0);
- reg_value = RREG64_PCIE((mc_umc_addr + umc_reg_offset) * 4);
- if (reg_value)
- dev_info(adev->dev, "MCA MISC0 0x%llx, umc_reg_offset 0x%x\n", reg_value, umc_reg_offset);
+ umc_v6_7_query_error_status_helper(adev, mc_umc_status, umc_reg_offset);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 563493d1f830..d7e31e48a2b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -833,7 +833,7 @@ out:
return ret;
}
-static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
+static void uvd_v5_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2d558c2f417d..375c440957dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -1494,7 +1494,7 @@ out:
return ret;
}
-static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
+static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 142e291983b4..8def62c83ffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -831,7 +831,7 @@ out:
return ret;
}
-static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags)
+static void vce_v3_0_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 1bf672966a62..17d44be58877 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -31,6 +31,7 @@
#include "soc15d.h"
#include "vcn_v2_0.h"
#include "mmsch_v1_0.h"
+#include "vcn_v2_5.h"
#include "vcn/vcn_2_5_offset.h"
#include "vcn/vcn_2_5_sh_mask.h"
@@ -59,6 +60,7 @@ static int vcn_v2_5_set_powergating_state(void *handle,
static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
+static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
@@ -100,6 +102,7 @@ static int vcn_v2_5_early_init(void *handle)
vcn_v2_5_set_dec_ring_funcs(adev);
vcn_v2_5_set_enc_ring_funcs(adev);
vcn_v2_5_set_irq_funcs(adev);
+ vcn_v2_5_set_ras_funcs(adev);
return 0;
}
@@ -1932,3 +1935,71 @@ const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
.rev = 0,
.funcs = &vcn_v2_6_ip_funcs,
};
+
+static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
+ uint32_t instance, uint32_t sub_block)
+{
+ uint32_t poison_stat = 0, reg_value = 0;
+
+ switch (sub_block) {
+ case AMDGPU_VCN_V2_6_VCPU_VCODEC:
+ reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
+ poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
+ break;
+ default:
+ break;
+ }
+
+ if (poison_stat)
+ dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
+ instance, sub_block);
+
+ return poison_stat;
+}
+
+static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
+{
+ uint32_t inst, sub;
+ uint32_t poison_stat = 0;
+
+ for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
+ for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
+ poison_stat +=
+ vcn_v2_6_query_poison_by_instance(adev, inst, sub);
+
+ return !!poison_stat;
+}
+
+const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
+ .query_poison_status = vcn_v2_6_query_poison_status,
+};
+
+static struct amdgpu_vcn_ras vcn_v2_6_ras = {
+ .ras_block = {
+ .hw_ops = &vcn_v2_6_ras_hw_ops,
+ },
+};
+
+static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[VCN_HWIP][0]) {
+ case IP_VERSION(2, 6, 0):
+ adev->vcn.ras = &vcn_v2_6_ras;
+ break;
+ default:
+ break;
+ }
+
+ if (adev->vcn.ras) {
+ amdgpu_ras_register_ras_block(adev, &adev->vcn.ras->ras_block);
+
+ strcpy(adev->vcn.ras->ras_block.ras_comm.name, "vcn");
+ adev->vcn.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
+ adev->vcn.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->vcn.ras_if = &adev->vcn.ras->ras_block.ras_comm;
+
+ /* If don't define special ras_late_init function, use default ras_late_init */
+ if (!adev->vcn.ras->ras_block.ras_late_init)
+ adev->vcn.ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
index e72f799ed0fd..1c19af74e4fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
@@ -24,6 +24,12 @@
#ifndef __VCN_V2_5_H__
#define __VCN_V2_5_H__
+enum amdgpu_vcn_v2_6_sub_block {
+ AMDGPU_VCN_V2_6_VCPU_VCODEC = 0,
+
+ AMDGPU_VCN_V2_6_MAX_SUB_BLOCK,
+};
+
extern const struct amdgpu_ip_block_version vcn_v2_5_ip_block;
extern const struct amdgpu_ip_block_version vcn_v2_6_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 039b90cdc3bc..c5b88d15a6df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -2033,7 +2033,7 @@ static int vi_common_set_powergating_state(void *handle,
return 0;
}
-static void vi_common_get_clockgating_state(void *handle, u32 *flags)
+static void vi_common_get_clockgating_state(void *handle, u64 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int data;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 607f65ab39ac..ee8b288dd8cc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1128,14 +1128,6 @@ err_pdd:
return ret;
}
-static bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
-{
- return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
- (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
- dev->adev->sdma.instance[0].fw_version >= 18) ||
- KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
-}
-
static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1146,7 +1138,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
long err = 0;
int i;
uint32_t *devices_arr = NULL;
- bool table_freed = false;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
@@ -1208,7 +1199,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem,
- peer_pdd->drm_priv, &table_freed);
+ peer_pdd->drm_priv);
if (err) {
struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
@@ -1233,13 +1224,11 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
}
/* Flush TLBs after waiting for the page table updates to complete */
- if (table_freed || !kfd_flush_tlb_after_unmap(dev)) {
- for (i = 0; i < args->n_devices; i++) {
- peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
- kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
- }
+ for (i = 0; i < args->n_devices; i++) {
+ peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
kfree(devices_arr);
@@ -2206,8 +2195,8 @@ static int criu_restore_bo(struct kfd_process *p,
if (IS_ERR(peer_pdd))
return PTR_ERR(peer_pdd);
- ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, peer_pdd->drm_priv,
- NULL);
+ ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
+ peer_pdd->drm_priv);
if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
return ret;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 1eaabd2cb41b..afc8a7fcdad8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1056,7 +1056,7 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
* table, add corresponded reversed direction link now.
*/
if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
- to_dev = kfd_topology_device_by_proximity_domain(id_to);
+ to_dev = kfd_topology_device_by_proximity_domain_no_lock(id_to);
if (!to_dev)
return -ENODEV;
/* same everything but the other direction */
@@ -2225,7 +2225,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
*/
if (kdev->hive_id) {
for (nid = 0; nid < proximity_domain; ++nid) {
- peer_dev = kfd_topology_device_by_proximity_domain(nid);
+ peer_dev = kfd_topology_device_by_proximity_domain_no_lock(nid);
if (!peer_dev->gpu)
continue;
if (peer_dev->gpu->hive_id != kdev->hive_id)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index acf4f7975850..198672264492 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
}
static void increment_queue_count(struct device_queue_manager *dqm,
- enum kfd_queue_type type)
+ struct qcm_process_device *qpd,
+ struct queue *q)
{
dqm->active_queue_count++;
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;
+
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count++;
+ qpd->mapped_gws_queue = true;
+ }
}
static void decrement_queue_count(struct device_queue_manager *dqm,
- enum kfd_queue_type type)
+ struct qcm_process_device *qpd,
+ struct queue *q)
{
dqm->active_queue_count--;
- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
+ q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;
+
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
}
/*
@@ -412,7 +426,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, qpd, q);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
- if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
- }
+ if (q->properties.is_active)
+ decrement_queue_count(dqm, qpd, q);
return retval;
}
@@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
* dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
- if (q->properties.is_active && !prev_active)
- increment_queue_count(dqm, q->properties.type);
- else if (!q->properties.is_active && prev_active)
- decrement_queue_count(dqm, q->properties.type);
-
- if (q->gws && !q->properties.is_gws) {
+ if (q->properties.is_active && !prev_active) {
+ increment_queue_count(dqm, &pdd->qpd, q);
+ } else if (!q->properties.is_active && prev_active) {
+ decrement_queue_count(dqm, &pdd->qpd, q);
+ } else if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count++;
pdd->qpd.mapped_gws_queue = true;
@@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
+ decrement_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
- decrement_queue_count(dqm, q->properties.type);
+ decrement_queue_count(dqm, qpd, q);
}
pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
@@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- increment_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count++;
- qpd->mapped_gws_queue = true;
- }
+ increment_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, &pdd->qpd, q);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- increment_queue_count(dqm, kq->queue->properties.type);
+ increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
- decrement_queue_count(dqm, kq->queue->properties.type);
+ decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
qpd->queue_count++;
if (q->properties.is_active) {
- increment_queue_count(dqm, q->properties.type);
+ increment_queue_count(dqm, qpd, q);
execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
+ decrement_queue_count(dqm, qpd, q);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
}
/*
@@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- decrement_queue_count(dqm, kq->queue->properties.type);
+ decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- if (q->properties.is_active) {
- decrement_queue_count(dqm, q->properties.type);
- if (q->properties.is_gws) {
- dqm->gws_queue_count--;
- qpd->mapped_gws_queue = false;
- }
- }
+ if (q->properties.is_active)
+ decrement_queue_count(dqm, qpd, q);
dqm->total_queue_count--;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 64f4a51cc880..6e5e8d637f48 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -128,8 +128,8 @@ static int allocate_event_notification_slot(struct kfd_process *p,
}
/*
- * Assumes that p->event_mutex is held and of course that p is not going
- * away (current or locked).
+ * Assumes that p->event_mutex or rcu_readlock is held and of course that p is
+ * not going away.
*/
static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
{
@@ -251,16 +251,18 @@ static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
struct kfd_event_waiter *waiter;
/* Wake up pending waiters. They will return failure */
+ spin_lock(&ev->lock);
list_for_each_entry(waiter, &ev->wq.head, wait.entry)
- waiter->event = NULL;
+ WRITE_ONCE(waiter->event, NULL);
wake_up_all(&ev->wq);
+ spin_unlock(&ev->lock);
if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
ev->type == KFD_EVENT_TYPE_DEBUG)
p->signal_event_count--;
idr_remove(&p->event_idr, ev->event_id);
- kfree(ev);
+ kfree_rcu(ev, rcu);
}
static void destroy_events(struct kfd_process *p)
@@ -392,6 +394,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
ev->auto_reset = auto_reset;
ev->signaled = false;
+ spin_lock_init(&ev->lock);
init_waitqueue_head(&ev->wq);
*event_page_offset = 0;
@@ -466,6 +469,7 @@ int kfd_criu_restore_event(struct file *devkfd,
ev->auto_reset = ev_priv->auto_reset;
ev->signaled = ev_priv->signaled;
+ spin_lock_init(&ev->lock);
init_waitqueue_head(&ev->wq);
mutex_lock(&p->event_mutex);
@@ -609,13 +613,13 @@ static void set_event(struct kfd_event *ev)
/* Auto reset if the list is non-empty and we're waking
* someone. waitqueue_active is safe here because we're
- * protected by the p->event_mutex, which is also held when
+ * protected by the ev->lock, which is also held when
* updating the wait queues in kfd_wait_on_events.
*/
ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
list_for_each_entry(waiter, &ev->wq.head, wait.entry)
- waiter->activated = true;
+ WRITE_ONCE(waiter->activated, true);
wake_up_all(&ev->wq);
}
@@ -626,16 +630,23 @@ int kfd_set_event(struct kfd_process *p, uint32_t event_id)
int ret = 0;
struct kfd_event *ev;
- mutex_lock(&p->event_mutex);
+ rcu_read_lock();
ev = lookup_event_by_id(p, event_id);
+ if (!ev) {
+ ret = -EINVAL;
+ goto unlock_rcu;
+ }
+ spin_lock(&ev->lock);
- if (ev && event_can_be_cpu_signaled(ev))
+ if (event_can_be_cpu_signaled(ev))
set_event(ev);
else
ret = -EINVAL;
- mutex_unlock(&p->event_mutex);
+ spin_unlock(&ev->lock);
+unlock_rcu:
+ rcu_read_unlock();
return ret;
}
@@ -650,23 +661,30 @@ int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
int ret = 0;
struct kfd_event *ev;
- mutex_lock(&p->event_mutex);
+ rcu_read_lock();
ev = lookup_event_by_id(p, event_id);
+ if (!ev) {
+ ret = -EINVAL;
+ goto unlock_rcu;
+ }
+ spin_lock(&ev->lock);
- if (ev && event_can_be_cpu_signaled(ev))
+ if (event_can_be_cpu_signaled(ev))
reset_event(ev);
else
ret = -EINVAL;
- mutex_unlock(&p->event_mutex);
+ spin_unlock(&ev->lock);
+unlock_rcu:
+ rcu_read_unlock();
return ret;
}
static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
{
- page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
+ WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT);
}
static void set_event_from_interrupt(struct kfd_process *p,
@@ -674,7 +692,9 @@ static void set_event_from_interrupt(struct kfd_process *p,
{
if (ev && event_can_be_gpu_signaled(ev)) {
acknowledge_signal(p, ev);
+ spin_lock(&ev->lock);
set_event(ev);
+ spin_unlock(&ev->lock);
}
}
@@ -693,7 +713,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
if (!p)
return; /* Presumably process exited. */
- mutex_lock(&p->event_mutex);
+ rcu_read_lock();
if (valid_id_bits)
ev = lookup_signaled_event_by_partial_id(p, partial_id,
@@ -721,7 +741,7 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
if (id >= KFD_SIGNAL_EVENT_LIMIT)
break;
- if (slots[id] != UNSIGNALED_EVENT_SLOT)
+ if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT)
set_event_from_interrupt(p, ev);
}
} else {
@@ -730,14 +750,14 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
* only signaled events from the IDR.
*/
for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
- if (slots[id] != UNSIGNALED_EVENT_SLOT) {
+ if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) {
ev = lookup_event_by_id(p, id);
set_event_from_interrupt(p, ev);
}
}
}
- mutex_unlock(&p->event_mutex);
+ rcu_read_unlock();
kfd_unref_process(p);
}
@@ -760,7 +780,7 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
return event_waiters;
}
-static int init_event_waiter_get_status(struct kfd_process *p,
+static int init_event_waiter(struct kfd_process *p,
struct kfd_event_waiter *waiter,
uint32_t event_id)
{
@@ -769,22 +789,15 @@ static int init_event_waiter_get_status(struct kfd_process *p,
if (!ev)
return -EINVAL;
+ spin_lock(&ev->lock);
waiter->event = ev;
waiter->activated = ev->signaled;
ev->signaled = ev->signaled && !ev->auto_reset;
-
- return 0;
-}
-
-static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
-{
- struct kfd_event *ev = waiter->event;
-
- /* Only add to the wait list if we actually need to
- * wait on this event.
- */
if (!waiter->activated)
add_wait_queue(&ev->wq, &waiter->wait);
+ spin_unlock(&ev->lock);
+
+ return 0;
}
/* test_event_condition - Test condition of events being waited for
@@ -804,10 +817,10 @@ static uint32_t test_event_condition(bool all, uint32_t num_events,
uint32_t activated_count = 0;
for (i = 0; i < num_events; i++) {
- if (!event_waiters[i].event)
+ if (!READ_ONCE(event_waiters[i].event))
return KFD_IOC_WAIT_RESULT_FAIL;
- if (event_waiters[i].activated) {
+ if (READ_ONCE(event_waiters[i].activated)) {
if (!all)
return KFD_IOC_WAIT_RESULT_COMPLETE;
@@ -836,6 +849,8 @@ static int copy_signaled_event_data(uint32_t num_events,
for (i = 0; i < num_events; i++) {
waiter = &event_waiters[i];
event = waiter->event;
+ if (!event)
+ return -EINVAL; /* event was destroyed */
if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
dst = &data[i].memory_exception_data;
src = &event->memory_exception_data;
@@ -846,11 +861,8 @@ static int copy_signaled_event_data(uint32_t num_events,
}
return 0;
-
}
-
-
static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
{
if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
@@ -874,9 +886,12 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
uint32_t i;
for (i = 0; i < num_events; i++)
- if (waiters[i].event)
+ if (waiters[i].event) {
+ spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
+ spin_unlock(&waiters[i].event->lock);
+ }
kfree(waiters);
}
@@ -900,6 +915,9 @@ int kfd_wait_on_events(struct kfd_process *p,
goto out;
}
+ /* Use p->event_mutex here to protect against concurrent creation and
+ * destruction of events while we initialize event_waiters.
+ */
mutex_lock(&p->event_mutex);
for (i = 0; i < num_events; i++) {
@@ -911,8 +929,8 @@ int kfd_wait_on_events(struct kfd_process *p,
goto out_unlock;
}
- ret = init_event_waiter_get_status(p, &event_waiters[i],
- event_data.event_id);
+ ret = init_event_waiter(p, &event_waiters[i],
+ event_data.event_id);
if (ret)
goto out_unlock;
}
@@ -930,10 +948,6 @@ int kfd_wait_on_events(struct kfd_process *p,
goto out_unlock;
}
- /* Add to wait lists if we need to wait. */
- for (i = 0; i < num_events; i++)
- init_event_waiter_add_to_waitlist(&event_waiters[i]);
-
mutex_unlock(&p->event_mutex);
while (true) {
@@ -978,14 +992,19 @@ int kfd_wait_on_events(struct kfd_process *p,
}
__set_current_state(TASK_RUNNING);
+ mutex_lock(&p->event_mutex);
/* copy_signaled_event_data may sleep. So this has to happen
* after the task state is set back to RUNNING.
+ *
+ * The event may also have been destroyed after signaling. So
+ * copy_signaled_event_data also must confirm that the event
+ * still exists. Therefore this must be under the p->event_mutex
+ * which is also held when events are destroyed.
*/
if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
ret = copy_signaled_event_data(num_events,
event_waiters, events);
- mutex_lock(&p->event_mutex);
out_unlock:
free_waiters(num_events, event_waiters);
mutex_unlock(&p->event_mutex);
@@ -1044,8 +1063,7 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
}
/*
- * Assumes that p->event_mutex is held and of course
- * that p is not going away (current or locked).
+ * Assumes that p is not going away.
*/
static void lookup_events_by_type_and_signal(struct kfd_process *p,
int type, void *event_data)
@@ -1057,6 +1075,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
+ rcu_read_lock();
+
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == type) {
@@ -1064,9 +1084,11 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
dev_dbg(kfd_device,
"Event found: id %X type %d",
ev->event_id, ev->type);
+ spin_lock(&ev->lock);
set_event(ev);
if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
ev->memory_exception_data = *ev_data;
+ spin_unlock(&ev->lock);
}
if (type == KFD_EVENT_TYPE_MEMORY) {
@@ -1089,6 +1111,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
p->lead_thread->pid, p->pasid);
}
}
+
+ rcu_read_unlock();
}
#ifdef KFD_SUPPORT_IOMMU_V2
@@ -1164,16 +1188,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) &&
KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) &&
- KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) {
- mutex_lock(&p->event_mutex);
-
- /* Lookup events by type and signal them */
+ KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0))
lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
&memory_exception_data);
- mutex_unlock(&p->event_mutex);
- }
-
kfd_unref_process(p);
}
#endif /* KFD_SUPPORT_IOMMU_V2 */
@@ -1190,12 +1208,7 @@ void kfd_signal_hw_exception_event(u32 pasid)
if (!p)
return; /* Presumably process exited. */
- mutex_lock(&p->event_mutex);
-
- /* Lookup events by type and signal them */
lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
-
- mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
@@ -1231,16 +1244,19 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid,
info->prot_write ? 1 : 0;
memory_exception_data.failure.imprecise = 0;
}
- mutex_lock(&p->event_mutex);
+
+ rcu_read_lock();
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+ spin_lock(&ev->lock);
ev->memory_exception_data = memory_exception_data;
set_event(ev);
+ spin_unlock(&ev->lock);
}
- mutex_unlock(&p->event_mutex);
+ rcu_read_unlock();
kfd_unref_process(p);
}
@@ -1274,22 +1290,28 @@ void kfd_signal_reset_event(struct kfd_dev *dev)
continue;
}
- mutex_lock(&p->event_mutex);
+ rcu_read_lock();
+
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ spin_lock(&ev->lock);
ev->hw_exception_data = hw_exception_data;
ev->hw_exception_data.gpu_id = user_gpu_id;
set_event(ev);
+ spin_unlock(&ev->lock);
}
if (ev->type == KFD_EVENT_TYPE_MEMORY &&
reset_cause == KFD_HW_EXCEPTION_ECC) {
+ spin_lock(&ev->lock);
ev->memory_exception_data = memory_exception_data;
ev->memory_exception_data.gpu_id = user_gpu_id;
set_event(ev);
+ spin_unlock(&ev->lock);
}
}
- mutex_unlock(&p->event_mutex);
+
+ rcu_read_unlock();
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
@@ -1322,19 +1344,25 @@ void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid)
memory_exception_data.gpu_id = user_gpu_id;
memory_exception_data.failure.imprecise = true;
- mutex_lock(&p->event_mutex);
+ rcu_read_lock();
+
idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ spin_lock(&ev->lock);
ev->hw_exception_data = hw_exception_data;
set_event(ev);
+ spin_unlock(&ev->lock);
}
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+ spin_lock(&ev->lock);
ev->memory_exception_data = memory_exception_data;
set_event(ev);
+ spin_unlock(&ev->lock);
}
}
- mutex_unlock(&p->event_mutex);
+
+ rcu_read_unlock();
/* user application will handle SIGBUS signal */
send_sig(SIGBUS, p->lead_thread, 0);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index 1238af11916e..1c62c8dd6460 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -59,6 +59,7 @@ struct kfd_event {
int type;
+ spinlock_t lock;
wait_queue_head_t wq; /* List of event waiters. */
/* Only for signal events. */
@@ -69,6 +70,8 @@ struct kfd_event {
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
};
+
+ struct rcu_head rcu; /* for asynchronous kfree_rcu */
};
#define KFD_EVENT_TIMEOUT_IMMEDIATE 0
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 7eedbcd14828..03c29bdd89a1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -91,28 +91,34 @@ enum SQ_INTERRUPT_ERROR_TYPE {
#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
static void event_interrupt_poison_consumption(struct kfd_dev *dev,
- uint16_t pasid, uint16_t source_id)
+ uint16_t pasid, uint16_t client_id)
{
- int ret = -EINVAL;
+ int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return;
/* all queues of a process will be unmapped in one time */
- if (atomic_read(&p->poison)) {
- kfd_unref_process(p);
- return;
- }
-
- atomic_set(&p->poison, 1);
+ old_poison = atomic_cmpxchg(&p->poison, 0, 1);
kfd_unref_process(p);
+ if (old_poison)
+ return;
- switch (source_id) {
- case SOC15_INTSRC_SQ_INTERRUPT_MSG:
+ switch (client_id) {
+ case SOC15_IH_CLIENTID_SE0SH:
+ case SOC15_IH_CLIENTID_SE1SH:
+ case SOC15_IH_CLIENTID_SE2SH:
+ case SOC15_IH_CLIENTID_SE3SH:
+ case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
break;
- case SOC15_INTSRC_SDMA_ECC:
+ case SOC15_IH_CLIENTID_SDMA0:
+ case SOC15_IH_CLIENTID_SDMA1:
+ case SOC15_IH_CLIENTID_SDMA2:
+ case SOC15_IH_CLIENTID_SDMA3:
+ case SOC15_IH_CLIENTID_SDMA4:
+ break;
default:
break;
}
@@ -122,10 +128,17 @@ static void event_interrupt_poison_consumption(struct kfd_dev *dev,
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
- if (!ret)
+ if (!ret) {
+ dev_warn(dev->adev->dev,
+ "RAS poison consumption, unmap queue flow succeeded: client id %d\n",
+ client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
- else
+ } else {
+ dev_warn(dev->adev->dev,
+ "RAS poison consumption, fall back to gpu reset flow: client id %d\n",
+ client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
+ }
}
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
@@ -270,7 +283,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
sq_intr_err);
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
- event_interrupt_poison_consumption(dev, pasid, source_id);
+ event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
break;
@@ -291,7 +304,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
- event_interrupt_poison_consumption(dev, pasid, source_id);
+ event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
@@ -300,6 +313,12 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+ if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
+ amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
+ event_interrupt_poison_consumption(dev, pasid, client_id);
+ return;
+ }
+
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index 9178cfe34f20..a9466d154395 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -146,15 +146,24 @@ static void interrupt_wq(struct work_struct *work)
struct kfd_dev *dev = container_of(work, struct kfd_dev,
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
+ long start_jiffies = jiffies;
if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(dev->adev->dev, "Ring entry too small\n");
return;
}
- while (dequeue_ih_ring_entry(dev, ih_ring_entry))
+ while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
dev->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
+ if (jiffies - start_jiffies > HZ) {
+ /* If we spent more than a second processing signals,
+ * reschedule the worker to avoid soft-lockup warnings
+ */
+ queue_work(dev->ih_wq, &dev->interrupt_work);
+ break;
+ }
+ }
}
bool interrupt_is_wanted(struct kfd_dev *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9967a73d5b0f..bfb3b5c288cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -706,6 +706,7 @@ struct kfd_process_device {
/* VM context for GPUVM allocations */
struct file *drm_file;
void *drm_priv;
+ atomic64_t tlb_seq;
/* GPUVM allocations storage */
struct idr alloc_idr;
@@ -1016,6 +1017,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain);
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
+ uint32_t proximity_domain);
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
@@ -1103,7 +1106,7 @@ struct kfd_criu_queue_priv_data {
uint32_t priority;
uint32_t q_percent;
uint32_t doorbell_id;
- uint32_t is_gws;
+ uint32_t gws;
uint32_t sdma_id;
uint32_t eop_ring_buffer_size;
uint32_t ctx_save_restore_area_size;
@@ -1326,6 +1329,14 @@ void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
+static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
+{
+ return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) &&
+ dev->adev->sdma.instance[0].fw_version >= 18) ||
+ KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
+}
+
bool kfd_is_locked(void);
/* Compute profile */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 59c04b2d383b..9e82d7aa67fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -722,7 +722,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
goto err_alloc_mem;
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
- pdd->drm_priv, NULL);
+ pdd->drm_priv);
if (err)
goto err_map_mem;
@@ -1560,6 +1560,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
return ret;
}
pdd->drm_priv = drm_file->private_data;
+ atomic64_set(&pdd->tlb_seq, 0);
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
@@ -1949,8 +1950,18 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
+ struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
+ uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
struct kfd_dev *dev = pdd->dev;
+ /*
+ * It can be that we race and lose here, but that is extremely unlikely
+ * and the worst thing which could happen is that we flush the changes
+ * into the TLB once more which is harmless.
+ */
+ if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
+ return;
+
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 6eca9509f2e3..4f58e671d39b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd,
q_data->ctx_save_restore_area_size =
q->properties.ctx_save_restore_area_size;
+ q_data->gws = !!q->gws;
+
ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
if (ret) {
pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
@@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
struct kfd_criu_queue_priv_data *q_data)
{
qp->is_interop = false;
- qp->is_gws = q_data->is_gws;
qp->queue_percent = q_data->q_percent;
qp->priority = q_data->priority;
qp->queue_address = q_data->q_address;
@@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
- ret = -EINVAL;
+ goto exit;
}
+ if (q_data->gws)
+ ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
+
exit:
if (ret)
- pr_err("Failed to create queue (%d)\n", ret);
+ pr_err("Failed to restore queue (%d)\n", ret);
else
pr_debug("Queue id %d was restored successfully\n", queue_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index b3fc3e958227..11b395b90a3d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1188,9 +1188,9 @@ svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_debug("[0x%llx 0x%llx]\n", start, last);
- return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL,
- start, last, init_pte_value, 0,
- NULL, NULL, fence, NULL);
+ return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
+ last, init_pte_value, 0, 0, NULL, NULL,
+ fence);
}
static int
@@ -1243,7 +1243,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
- bool table_freed = false;
uint64_t pte_flags;
unsigned long last_start;
int last_domain;
@@ -1278,13 +1277,12 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
- r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
- NULL, last_start,
- prange->start + i, pte_flags,
- last_start - prange->start,
- NULL, dma_addr,
- &vm->last_update,
- &table_freed);
+ r = amdgpu_vm_update_range(adev, vm, false, false, false, NULL,
+ last_start, prange->start + i,
+ pte_flags,
+ last_start - prange->start,
+ bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
+ NULL, dma_addr, &vm->last_update);
for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain;
@@ -1306,8 +1304,6 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
if (fence)
*fence = dma_fence_get(vm->last_update);
- if (table_freed)
- kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
out:
return r;
}
@@ -1363,6 +1359,8 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
break;
}
}
+
+ kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
}
return r;
@@ -1372,7 +1370,7 @@ struct svm_validate_context {
struct kfd_process *process;
struct svm_range *prange;
bool intr;
- unsigned long bitmap[MAX_GPU_INSTANCE];
+ DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
struct list_head validate_list;
struct ww_acquire_ctx ticket;
@@ -2687,11 +2685,6 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
return 0;
}
- if (!p->xnack_enabled) {
- pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
- r = -EFAULT;
- goto out;
- }
svms = &p->svms;
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
@@ -2702,6 +2695,12 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
goto out;
}
+ if (!p->xnack_enabled) {
+ pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
+ r = -EFAULT;
+ goto out;
+ }
+
/* p->lead_thread is available as kfd_process_wq_release flush the work
* before releasing task ref.
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 3bdcae239bc0..8b7710b4d3ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -46,22 +46,32 @@ static struct list_head topology_device_list;
static struct kfd_system_properties sys_props;
static DECLARE_RWSEM(topology_lock);
-static atomic_t topology_crat_proximity_domain;
+static uint32_t topology_crat_proximity_domain;
-struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock(
uint32_t proximity_domain)
{
struct kfd_topology_device *top_dev;
struct kfd_topology_device *device = NULL;
- down_read(&topology_lock);
-
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->proximity_domain == proximity_domain) {
device = top_dev;
break;
}
+ return device;
+}
+
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ uint32_t proximity_domain)
+{
+ struct kfd_topology_device *device = NULL;
+
+ down_read(&topology_lock);
+
+ device = kfd_topology_device_by_proximity_domain_no_lock(
+ proximity_domain);
up_read(&topology_lock);
return device;
@@ -1060,7 +1070,7 @@ int kfd_topology_init(void)
down_write(&topology_lock);
kfd_topology_update_device_list(&temp_topology_device_list,
&topology_device_list);
- atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
+ topology_crat_proximity_domain = sys_props.num_devices-1;
ret = kfd_topology_update_sysfs();
up_write(&topology_lock);
@@ -1295,8 +1305,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
- proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
-
/* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) {
struct kfd_topology_device *top_dev;
@@ -1321,12 +1329,16 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
*/
dev = kfd_assign_gpu(gpu);
if (!dev) {
+ down_write(&topology_lock);
+ proximity_domain = ++topology_crat_proximity_domain;
+
res = kfd_create_crat_image_virtual(&crat_image, &image_size,
COMPUTE_UNIT_GPU, gpu,
proximity_domain);
if (res) {
pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
gpu_id);
+ topology_crat_proximity_domain--;
return res;
}
res = kfd_parse_crat_table(crat_image,
@@ -1335,10 +1347,10 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
if (res) {
pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
gpu_id);
+ topology_crat_proximity_domain--;
goto err;
}
- down_write(&topology_lock);
kfd_topology_update_device_list(&temp_topology_device_list,
&topology_device_list);
@@ -1485,25 +1497,78 @@ err:
return res;
}
+/**
+ * kfd_topology_update_io_links() - Update IO links after device removal.
+ * @proximity_domain: Proximity domain value of the dev being removed.
+ *
+ * The topology list currently is arranged in increasing order of
+ * proximity domain.
+ *
+ * Two things need to be done when a device is removed:
+ * 1. All the IO links to this device need to be removed.
+ * 2. All nodes after the current device node need to move
+ * up once this device node is removed from the topology
+ * list. As a result, the proximity domain values for
+ * all nodes after the node being deleted reduce by 1.
+ * This would also cause the proximity domain values for
+ * io links to be updated based on new proximity domain
+ * values.
+ *
+ * Context: The caller must hold write topology_lock.
+ */
+static void kfd_topology_update_io_links(int proximity_domain)
+{
+ struct kfd_topology_device *dev;
+ struct kfd_iolink_properties *iolink, *tmp;
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (dev->proximity_domain > proximity_domain)
+ dev->proximity_domain--;
+
+ list_for_each_entry_safe(iolink, tmp, &dev->io_link_props, list) {
+ /*
+ * If there is an io link to the dev being deleted
+ * then remove that IO link also.
+ */
+ if (iolink->node_to == proximity_domain) {
+ list_del(&iolink->list);
+ dev->io_link_count--;
+ dev->node_props.io_links_count--;
+ } else if (iolink->node_from > proximity_domain) {
+ iolink->node_from--;
+ } else if (iolink->node_to > proximity_domain) {
+ iolink->node_to--;
+ }
+ }
+
+ }
+}
+
int kfd_topology_remove_device(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev, *tmp;
uint32_t gpu_id;
int res = -ENODEV;
+ int i = 0;
down_write(&topology_lock);
- list_for_each_entry_safe(dev, tmp, &topology_device_list, list)
+ list_for_each_entry_safe(dev, tmp, &topology_device_list, list) {
if (dev->gpu == gpu) {
gpu_id = dev->gpu_id;
kfd_remove_sysfs_node_entry(dev);
kfd_release_topology_device(dev);
sys_props.num_devices--;
+ kfd_topology_update_io_links(i);
+ topology_crat_proximity_domain = sys_props.num_devices-1;
+ sys_props.generation_count++;
res = 0;
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
break;
}
+ i++;
+ }
up_write(&topology_lock);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2ade82cfb1ac..a6880dd9c0bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9254,7 +9254,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&bundle->flip_addrs[planes_count].address,
afb->tmz_surface, false);
- DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
+ drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
bundle->plane_infos[planes_count].dcc.enable);
@@ -9288,7 +9288,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane,
bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
__func__,
bundle->flip_addrs[planes_count].address.grph.addr.high_part,
bundle->flip_addrs[planes_count].address.grph.addr.low_part);
@@ -9630,7 +9630,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- DRM_DEBUG_ATOMIC(
+ drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
@@ -10334,7 +10334,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- DRM_DEBUG_ATOMIC(
+ drm_dbg_state(state->dev,
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index da17ece1a2c5..188039f14544 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3491,6 +3491,40 @@ DEFINE_SHOW_ATTRIBUTE(mst_topo);
DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
visual_confirm_set, "%llu\n");
+
+/*
+ * Sets the DC skip_detection_link_training debug option from the given string.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_skip_detection_link_training
+ */
+static int skip_detection_link_training_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ if (val == 0)
+ adev->dm.dc->debug.skip_detection_link_training = false;
+ else
+ adev->dm.dc->debug.skip_detection_link_training = true;
+
+ return 0;
+}
+
+/*
+ * Reads the DC skip_detection_link_training debug option value into the given buffer.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_skip_detection_link_training
+ */
+static int skip_detection_link_training_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.skip_detection_link_training;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops,
+ skip_detection_link_training_get,
+ skip_detection_link_training_set, "%llu\n");
+
/*
* Dumps the DCC_EN bit for each pipe.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en
@@ -3584,6 +3618,9 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev,
+ &skip_detection_link_training_fops);
+
debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
adev, &dmub_tracebuffer_fops);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index f5f39984702f..28cf24f6ab32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -90,7 +90,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
{
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
- struct edid *edid_buf = (struct edid *) edid->raw_edid;
+ struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL;
struct cea_sad *sads;
int sad_count = -1;
int sadb_count = -1;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 0c923a90615c..13b1751e69bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -27,6 +27,7 @@
#include "dc.h"
#include "dm_helpers.h"
#include "amdgpu_dm.h"
+#include "modules/power/power_helpers.h"
#ifdef CONFIG_DRM_AMD_DC_DCN
static bool link_supports_psrsu(struct dc_link *link)
@@ -39,6 +40,9 @@ static bool link_supports_psrsu(struct dc_link *link)
if (dc->ctx->dce_version < DCN_VERSION_3_1)
return false;
+ if (!is_psr_su_specific_panel(link))
+ return false;
+
if (!link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP ||
!link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED)
return false;
@@ -79,7 +83,10 @@ void amdgpu_dm_set_psr_caps(struct dc_link *link)
link->psr_settings.psr_feature_enabled = true;
}
- DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
+ DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d\n",
+ link->psr_settings.psr_feature_enabled,
+ link->psr_settings.psr_version,
+ link->dpcd_caps.psr_info.psr_version);
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
index a8cb039d2572..34e3a64f556e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
@@ -213,6 +213,9 @@ static enum connector_id connector_id_from_bios_object_id(
case CONNECTOR_OBJECT_ID_MXM:
id = CONNECTOR_ID_MXM;
break;
+ case CONNECTOR_OBJECT_ID_USBC:
+ id = CONNECTOR_ID_USBC;
+ break;
default:
id = CONNECTOR_ID_UNKNOWN;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 0e36cd800fc9..32efa92422e8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -522,7 +522,8 @@ static enum bp_result transmitter_control_v2(
*/
params.acConfig.ucEncoderSel = 1;
- if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+ if (CONNECTOR_ID_DISPLAY_PORT == connector_id
+ || CONNECTOR_ID_USBC == connector_id)
/* Bit4: DP connector flag
* =0 connector is none-DP connector
* =1 connector is DP connector
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index dfba6138f538..26feefbb8990 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
clk_mgr_dce->dprefclk_ss_percentage =
info.spread_spectrum_percentage;
}
- if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+ if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
clk_mgr_dce->dprefclk_ss_percentage = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
index fbdd0a92d146..451e8d6cd8bd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -157,8 +157,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
} else {
if (update_dppclk || update_dispclk)
dcn20_update_clocks_update_dentist(clk_mgr, context);
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index f4dee0e48a67..02943ca65807 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -88,11 +88,22 @@ static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context)
static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
+ int display_count;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+
+ display_count = rn_get_active_display_cnt_wa(dc, context);
- rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
- /* update power state */
- clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
}
static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 8161a6ae410d..30c6f9cd717f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -94,6 +94,9 @@ static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
{
uint32_t result;
+ result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
+ ASSERT(result == VBIOSSMC_Result_OK);
+
/* First clear response register */
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Status_BUSY);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index bc4ddc36fe58..f310b0d25a07 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -176,8 +176,7 @@ static void vg_update_clocks(struct clk_mgr *clk_mgr_base,
if (update_dppclk || update_dispclk)
dcn301_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 59fdd7f0d609..969b40250434 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -615,13 +615,37 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
}
+void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ int display_count;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn31_get_active_display_cnt_wa(dc, context);
+ /* if we can go lower, go lower */
+ if (display_count == 0) {
+ union display_idle_optimization_u idle_info = { 0 };
+
+ idle_info.idle_info.df_request_disabled = 1;
+ idle_info.idle_info.phy_ref_clk_off = 1;
+ idle_info.idle_info.s0i2_rdy = 1;
+ dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ }
+}
+
static struct clk_mgr_funcs dcn31_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn31_update_clocks,
.init_clocks = dcn31_init_clocks,
.enable_pme_wa = dcn31_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
- .notify_wm_ranges = dcn31_notify_wm_ranges
+ .notify_wm_ranges = dcn31_notify_wm_ranges,
+ .set_low_power_state = dcn31_set_low_power_state
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 702d00ce7da4..3121dd2d2a91 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
dce_clock_read_ss_info(&clk_mgr->base);
- clk_mgr->base.dccg->ref_dtbclk_khz =
- dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+ /*clk_mgr->base.dccg->ref_dtbclk_khz =
+ dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
clk_mgr->base.base.bw_params = &dcn316_bw_params;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c436db416708..c2fcd67bcc4d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1569,11 +1569,24 @@ bool dc_validate_boot_timing(const struct dc *dc,
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz;
+ uint32_t numOdmPipes = 1;
+ uint32_t id_src[4] = {0};
dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
dc->res_pool->dp_clock_source,
tg_inst, &pix_clk_100hz);
+ if (tg->funcs->get_optc_source)
+ tg->funcs->get_optc_source(tg,
+ &numOdmPipes, &id_src[0], &id_src[1]);
+
+ if (numOdmPipes == 2)
+ pix_clk_100hz *= 2;
+ if (numOdmPipes == 4)
+ pix_clk_100hz *= 4;
+
+ // Note: In rare cases, HW pixclk may differ from crtc's pixclk
+ // slightly due to rounding issues in 10 kHz units.
if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index bbaa5abdf888..faab1460d0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -345,6 +345,7 @@ static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
case CONNECTOR_ID_LVDS:
return SIGNAL_TYPE_LVDS;
case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
return SIGNAL_TYPE_DISPLAY_PORT;
case CONNECTOR_ID_EDP:
return SIGNAL_TYPE_EDP;
@@ -380,7 +381,8 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
bool present =
((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP));
+ (connector_id == CONNECTOR_ID_EDP) ||
+ (connector_id == CONNECTOR_ID_USBC));
ddc = dal_ddc_service_get_ddc_pin(link->ddc);
@@ -476,7 +478,8 @@ static enum signal_type link_detect_sink(struct dc_link *link,
result = SIGNAL_TYPE_DVI_SINGLE_LINK;
}
break;
- case CONNECTOR_ID_DISPLAY_PORT: {
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC: {
/* DP HPD short pulse. Passive DP dongle will not
* have short pulse
*/
@@ -1591,6 +1594,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
if (link->hpd_gpio)
@@ -3075,6 +3079,11 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
+ if (allow_active && link->type == dc_connection_none) {
+ // Don't enter PSR if panel is not connected
+ return false;
+ }
+
/* Set power optimization flag */
if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
link->psr_settings.psr_power_opt = *power_opts;
@@ -3083,6 +3092,10 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
}
+ if (psr != NULL && link->psr_settings.psr_feature_enabled &&
+ force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+
/* Enable or Disable PSR */
if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
link->psr_settings.psr_allow_active = *allow_active;
@@ -3093,8 +3106,6 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
#endif
if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- if (force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
} else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
link->psr_settings.psr_feature_enabled)
@@ -3476,8 +3487,6 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
- struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct link_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
const struct dc_link_settings empty_link_settings = {0};
@@ -3511,7 +3520,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
pipe_ctx->pipe_idx);
}
- proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder;
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
ASSERT(proposed_table.stream_count == 1);
@@ -3524,8 +3533,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
proposed_table.stream_allocations[0].slot_count);
/* program DP source TX for payload */
- hpo_dp_link_encoder->funcs->update_stream_allocation_table(
- hpo_dp_link_encoder,
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
&proposed_table);
/* poll for ACT handled */
@@ -3562,8 +3570,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_encoder = NULL;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
@@ -3573,9 +3579,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
DC_LOGGER_INIT(link->ctx->logger);
- link_encoder = link_enc_cfg_get_link_enc(link);
- ASSERT(link_encoder);
-
/* enable_link_dp_mst already check link->enabled_stream_count
* and stream is in link->stream[]. This is called during set mode,
* stream_enc is available.
@@ -3620,37 +3623,17 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
ASSERT(proposed_table.stream_count > 0);
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- static enum dc_status status;
- uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
-
- for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
- mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
-
- status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
- mst_alloc_slots, &prev_mst_slots_in_use);
- ASSERT(status == DC_OK);
- DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
- status, mst_alloc_slots, prev_mst_slots_in_use);
- }
-
/* program DP source TX for payload */
- switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
- case DP_8b_10b_ENCODING:
- link_encoder->funcs->update_mst_stream_allocation_table(
- link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_128b_132b_ENCODING:
- hpo_dp_link_encoder->funcs->update_stream_allocation_table(
- hpo_dp_link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_UNKNOWN_ENCODING:
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
DC_LOG_ERROR("Failure: unknown encoding format\n");
return DC_ERROR_UNEXPECTED;
}
+ link_hwss->ext.update_stream_allocation_table(link,
+ &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
/* send down message */
ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
@@ -3692,7 +3675,6 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- struct link_encoder *link_encoder = link->link_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
uint8_t i;
enum act_return_status ret;
@@ -3756,8 +3738,13 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw
ASSERT(proposed_table.stream_count > 0);
/* update mst stream allocation table hardware state */
- link_encoder->funcs->update_mst_stream_allocation_table(
- link_encoder,
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
&link->mst_stream_alloc_table);
/* poll for immediate branch device ACT handled */
@@ -3852,8 +3839,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_encoder = NULL;
- struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
int i;
@@ -3862,9 +3847,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger);
- link_encoder = link_enc_cfg_get_link_enc(link);
- ASSERT(link_encoder);
-
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -3922,36 +3904,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
}
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- enum dc_status status;
- uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
-
- for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
- mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
-
- status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
- mst_alloc_slots, &prev_mst_slots_in_use);
- ASSERT(status != DC_NOT_SUPPORTED);
- DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
- status, mst_alloc_slots, prev_mst_slots_in_use);
- }
-
- switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
- case DP_8b_10b_ENCODING:
- link_encoder->funcs->update_mst_stream_allocation_table(
- link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_128b_132b_ENCODING:
- hpo_dp_link_encoder->funcs->update_stream_allocation_table(
- hpo_dp_link_encoder,
- &link->mst_stream_alloc_table);
- break;
- case DP_UNKNOWN_ENCODING:
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
DC_LOG_DEBUG("Unknown encoding format\n");
return DC_ERROR_UNEXPECTED;
}
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
if (mst_mode) {
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
@@ -4098,8 +4060,8 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi
proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
}
- pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table(
- pipe_ctx->link_res.hpo_dp_link_enc,
+ link_hwss->ext.update_stream_allocation_table(stream->link,
+ &pipe_ctx->link_res,
&proposed_table);
if (link_hwss->ext.set_throttled_vcp_size)
@@ -4119,6 +4081,7 @@ void core_link_enable_stream(
struct link_encoder *link_enc;
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
if (is_dp_128b_132b_signal(pipe_ctx))
vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
@@ -4147,56 +4110,19 @@ void core_link_enable_stream(
link_enc->funcs->setup(
link_enc,
pipe_ctx->stream->signal);
- pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.tg->inst,
- stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
- }
-
- if (is_dp_128b_132b_signal(pipe_ctx)) {
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &stream->timing,
- stream->output_color_space,
- stream->use_vsc_sdp_for_colorimetry,
- stream->timing.flags.DSC,
- false);
- otg_out_dest = OUT_MUX_HPO_DP;
- } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing,
- stream->output_color_space,
- stream->use_vsc_sdp_for_colorimetry,
- stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
}
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
-
- if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing,
- stream->phy_pix_clk,
- pipe_ctx->stream_res.audio != NULL);
-
pipe_ctx->stream->link->link_state_valid = true;
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ otg_out_dest = OUT_MUX_HPO_DP;
+ else
+ otg_out_dest = OUT_MUX_DIO;
pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
+ }
- if (dc_is_dvi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing,
- (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
-
- if (dc_is_lvds_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->lvds_set_stream_attribute(
- pipe_ctx->stream_res.stream_enc,
- &stream->timing);
+ link_hwss->setup_stream_attribute(pipe_ctx);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
bool apply_edp_fast_boot_optimization =
@@ -4331,13 +4257,11 @@ void core_link_enable_stream(
dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (is_dp_128b_132b_signal(pipe_ctx))
fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
- }
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
-
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
@@ -4683,22 +4607,22 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
bool dc_link_should_enable_fec(const struct dc_link *link)
{
- bool is_fec_disable = false;
- bool ret = false;
+ bool force_disable = false;
- if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ if (link->fec_state == dc_link_fec_enabled)
+ force_disable = false;
+ else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
- link->local_sink->edid_caps.panel_patch.disable_fec) ||
- (link->connector_signal == SIGNAL_TYPE_EDP
- // enable FEC for EDP if DSC is supported
- && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false
- ))
- is_fec_disable = true;
-
- if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
- ret = true;
-
- return ret;
+ link->local_sink->edid_caps.panel_patch.disable_fec)
+ force_disable = true;
+ else if (link->connector_signal == SIGNAL_TYPE_EDP
+ && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
+ dsc_support.DSC_SUPPORT == false
+ || link->dc->debug.disable_dsc_edp
+ || !link->dc->caps.edp_dsc_support))
+ force_disable = true;
+
+ return !force_disable && dc_link_is_fec_supported(link);
}
uint32_t dc_bandwidth_in_kbps_from_timing(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 22dabe596dfc..c5f5d25035d2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -4085,9 +4085,32 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
return false;
}
+static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
+{
+ switch (test_rate) {
+ case DP_TEST_LINK_RATE_RBR:
+ return LINK_RATE_LOW;
+ case DP_TEST_LINK_RATE_HBR:
+ return LINK_RATE_HIGH;
+ case DP_TEST_LINK_RATE_HBR2:
+ return LINK_RATE_HIGH2;
+ case DP_TEST_LINK_RATE_HBR3:
+ return LINK_RATE_HIGH3;
+ case DP_TEST_LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR10;
+ case DP_TEST_LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR20;
+ case DP_TEST_LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR13_5;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
static void dp_test_send_link_training(struct dc_link *link)
{
struct dc_link_settings link_settings = {0};
+ uint8_t test_rate = 0;
core_link_read_dpcd(
link,
@@ -4097,8 +4120,9 @@ static void dp_test_send_link_training(struct dc_link *link)
core_link_read_dpcd(
link,
DP_TEST_LINK_RATE,
- (unsigned char *)(&link_settings.link_rate),
+ &test_rate,
1);
+ link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
/* Set preferred link settings */
link->verified_link_cap.lane_count = link_settings.lane_count;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index d251c3f3a714..f292303b75a5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1076,6 +1076,15 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+ /* Invalid input */
+ if (!plane_state->dst_rect.width ||
+ !plane_state->dst_rect.height ||
+ !plane_state->src_rect.width ||
+ !plane_state->src_rect.height) {
+ ASSERT(0);
+ return false;
+ }
+
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
@@ -2111,6 +2120,8 @@ static int acquire_resource_from_hw_enabled_state(
{
struct dc_link *link = stream->link;
unsigned int i, inst, tg_inst = 0;
+ uint32_t numPipes = 1;
+ uint32_t id_src[4] = {0};
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
@@ -2139,38 +2150,62 @@ static int acquire_resource_from_hw_enabled_state(
if (!res_ctx->pipe_ctx[tg_inst].stream) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[tg_inst];
- pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
- pipe_ctx->plane_res.mi = pool->mis[tg_inst];
- pipe_ctx->plane_res.hubp = pool->hubps[tg_inst];
- pipe_ctx->plane_res.ipp = pool->ipps[tg_inst];
- pipe_ctx->plane_res.xfm = pool->transforms[tg_inst];
- pipe_ctx->plane_res.dpp = pool->dpps[tg_inst];
- pipe_ctx->stream_res.opp = pool->opps[tg_inst];
-
- if (pool->dpps[tg_inst]) {
- pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst;
-
- // Read DPP->MPCC->OPP Pipe from HW State
- if (pool->mpc->funcs->read_mpcc_state) {
- struct mpcc_state s = {0};
-
- pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
-
- if (s.dpp_id < MAX_MPCC)
- pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id;
-
- if (s.bot_mpcc_id < MAX_MPCC)
- pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
- &pool->mpc->mpcc_array[s.bot_mpcc_id];
+ id_src[0] = tg_inst;
+
+ if (pipe_ctx->stream_res.tg->funcs->get_optc_source)
+ pipe_ctx->stream_res.tg->funcs->get_optc_source(pipe_ctx->stream_res.tg,
+ &numPipes, &id_src[0], &id_src[1]);
+
+ for (i = 0; i < numPipes; i++) {
+ //Check if src id invalid
+ if (id_src[i] == 0xf)
+ return -1;
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[tg_inst];
+ pipe_ctx->plane_res.mi = pool->mis[id_src[i]];
+ pipe_ctx->plane_res.hubp = pool->hubps[id_src[i]];
+ pipe_ctx->plane_res.ipp = pool->ipps[id_src[i]];
+ pipe_ctx->plane_res.xfm = pool->transforms[id_src[i]];
+ pipe_ctx->plane_res.dpp = pool->dpps[id_src[i]];
+ pipe_ctx->stream_res.opp = pool->opps[id_src[i]];
+
+ if (pool->dpps[id_src[i]]) {
+ pipe_ctx->plane_res.mpcc_inst = pool->dpps[id_src[i]]->inst;
+
+ if (pool->mpc->funcs->read_mpcc_state) {
+ struct mpcc_state s = {0};
+ pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s);
+ if (s.dpp_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id =
+ s.dpp_id;
+ if (s.bot_mpcc_id < MAX_MPCC)
+ pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot =
+ &pool->mpc->mpcc_array[s.bot_mpcc_id];
+ if (s.opp_id < MAX_OPP)
+ pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ }
+ }
+ pipe_ctx->pipe_idx = id_src[i];
- if (s.opp_id < MAX_OPP)
- pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id;
+ if (id_src[i] >= pool->timing_generator_count) {
+ id_src[i] = pool->timing_generator_count - 1;
+ pipe_ctx->stream_res.tg = pool->timing_generators[id_src[i]];
+ pipe_ctx->stream_res.opp = pool->opps[id_src[i]];
}
+
+ pipe_ctx->stream = stream;
}
- pipe_ctx->pipe_idx = tg_inst;
- pipe_ctx->stream = stream;
- return tg_inst;
+ if (numPipes == 2) {
+ stream->apply_boot_odm_mode = dm_odm_combine_policy_2to1;
+ res_ctx->pipe_ctx[id_src[0]].next_odm_pipe = &res_ctx->pipe_ctx[id_src[1]];
+ res_ctx->pipe_ctx[id_src[0]].prev_odm_pipe = NULL;
+ res_ctx->pipe_ctx[id_src[1]].next_odm_pipe = NULL;
+ res_ctx->pipe_ctx[id_src[1]].prev_odm_pipe = &res_ctx->pipe_ctx[id_src[0]];
+ } else
+ stream->apply_boot_odm_mode = dm_odm_combine_mode_disabled;
+
+ return id_src[0];
}
return -1;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 77ef9d1f9ea8..2f0c436dae4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.177"
+#define DC_VER "3.2.181"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -340,6 +340,7 @@ struct dc_config {
bool is_asymmetric_memory;
bool is_single_rank_dimm;
bool use_pipe_ctx_sync_logic;
+ bool ignore_dpref_ss;
};
enum visual_confirm {
@@ -416,6 +417,7 @@ struct dc_clocks {
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum dcn_zstate_support_state zstate_support;
bool dtbclk_en;
+ int dtbclk_khz;
#endif
enum dcn_pwr_state pwr_state;
/*
@@ -665,6 +667,7 @@ struct dc_debug_options {
uint32_t edid_read_retry_times;
bool remove_disconnect_edp;
unsigned int force_odm_combine; //bit vector based on otg inst
+ unsigned int seamless_boot_odm_combine;
#if defined(CONFIG_DRM_AMD_DC_DCN)
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
bool disable_z9_mpc;
@@ -729,7 +732,6 @@ struct dc_debug_options {
bool apply_vendor_specific_lttpr_wa;
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
- bool ignore_dpref_ss;
uint8_t psr_power_use_phy_fsm;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 36ac2a8746bd..2c54b6e0498b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -133,6 +133,16 @@ enum dp_link_encoding {
DP_128b_132b_ENCODING = 2,
};
+enum dp_test_link_rate {
+ DP_TEST_LINK_RATE_RBR = 0x06,
+ DP_TEST_LINK_RATE_HBR = 0x0A,
+ DP_TEST_LINK_RATE_HBR2 = 0x14,
+ DP_TEST_LINK_RATE_HBR3 = 0x1E,
+ DP_TEST_LINK_RATE_UHBR10 = 0x01,
+ DP_TEST_LINK_RATE_UHBR20 = 0x02,
+ DP_TEST_LINK_RATE_UHBR13_5 = 0x03,
+};
+
struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
@@ -620,7 +630,7 @@ union test_request {
uint8_t LINK_TEST_PATTRN :1;
uint8_t EDID_READ :1;
uint8_t PHY_TEST_PATTERN :1;
- uint8_t RESERVED :1;
+ uint8_t PHY_TEST_CHANNEL_CODING_TYPE :2;
uint8_t AUDIO_TEST_PATTERN :1;
uint8_t TEST_AUDIO_DISABLED_VIDEO :1;
} bits;
@@ -993,8 +1003,8 @@ union dp_128b_132b_supported_link_rates {
union dp_128b_132b_supported_lttpr_link_rates {
struct {
uint8_t UHBR10 :1;
- uint8_t UHBR13_5:1;
uint8_t UHBR20 :1;
+ uint8_t UHBR13_5:1;
uint8_t RESERVED:5;
} bits;
uint8_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index aa818bf840eb..a3c37ee3f849 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -67,13 +67,9 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
-struct time_stamp {
- uint64_t edp_poweroff;
- uint64_t edp_poweron;
-};
-
-struct link_trace {
- struct time_stamp time_stamp;
+struct edp_trace_power_timestamps {
+ uint64_t poweroff;
+ uint64_t poweron;
};
struct dp_trace_lt_counts {
@@ -96,6 +92,7 @@ struct dp_trace {
struct dp_trace_lt commit_lt_trace;
unsigned int link_loss_count;
bool is_initialized;
+ struct edp_trace_power_timestamps edp_trace_power_timestamps;
};
/* PSR feature flags */
@@ -231,7 +228,6 @@ struct dc_link {
struct dc_link_status link_status;
struct dprx_states dprx_states;
- struct link_trace link_trace;
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c4168c11257c..580420c3eedc 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -246,6 +246,7 @@ struct dc_stream_state {
bool apply_edp_fast_boot_optimization;
bool apply_seamless_boot_optimization;
+ uint32_t apply_boot_odm_mode;
uint32_t stream_id;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 8e814000db62..29e20d92b0bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -565,13 +565,11 @@ int dce_aux_transfer_raw(struct ddc_service *ddc,
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine;
struct aux_request_transaction_data aux_req;
- struct aux_reply_transaction_data aux_rep;
uint8_t returned_bytes = 0;
int res = -1;
uint32_t status;
memset(&aux_req, 0, sizeof(aux_req));
- memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
if (!acquire(aux_engine, ddc_pin)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index cc5128e67daf..760653e2b607 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -985,7 +985,7 @@ static bool dcn31_program_pix_clk(
struct bp_pixel_clock_parameters bp_pc_params = {0};
enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
// For these signal types Driver to program DP_DTO without calling VBIOS Command table
- if (dc_is_dp_signal(pix_clk_params->signal_type)) {
+ if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
if (e) {
/* Set DTO values: phase = target clock, modulo = reference clock*/
REG_WRITE(PHASE[inst], e->target_pixel_rate_khz * e->mult_factor);
@@ -1254,7 +1254,7 @@ static uint32_t dcn3_get_pix_clk_dividers(
struct pixel_clk_params *pix_clk_params,
struct pll_settings *pll_settings)
{
- unsigned long long actual_pix_clk_100Hz = pix_clk_params->requested_pix_clk_100hz;
+ unsigned long long actual_pix_clk_100Hz = pix_clk_params ? pix_clk_params->requested_pix_clk_100hz : 0;
struct dce110_clk_src *clk_src;
clk_src = TO_DCE110_CLK_SRC(cs);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 248602c15f3a..9fc1ba12ec19 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -67,6 +67,7 @@
#include "dcn10/dcn10_hw_sequencer.h"
+#include "link/link_dp_trace.h"
#include "dce110_hw_sequencer.h"
#define GAMMA_HW_POINTS_NUM 256
@@ -819,19 +820,19 @@ void dce110_edp_power_control(
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link->link_trace.time_stamp.edp_poweroff), 1000000);
+ dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
unsigned long long time_since_edp_poweron_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link->link_trace.time_stamp.edp_poweron), 1000000);
+ dp_trace_get_edp_poweron_timestamp(link)), 1000000);
DC_LOG_HW_RESUME_S3(
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
__func__,
power_up,
current_ts,
- link->link_trace.time_stamp.edp_poweroff,
- link->link_trace.time_stamp.edp_poweron,
+ dp_trace_get_edp_poweroff_timestamp(link),
+ dp_trace_get_edp_poweron_timestamp(link),
time_since_edp_poweroff_ms,
time_since_edp_poweron_ms);
@@ -846,7 +847,7 @@ void dce110_edp_power_control(
link->local_sink->edid_caps.panel_patch.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
- if (link->link_trace.time_stamp.edp_poweroff != 0) {
+ if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
remaining_min_edp_poweroff_time_ms =
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
@@ -904,17 +905,13 @@ void dce110_edp_power_control(
__func__, (power_up ? "On":"Off"),
bp_result);
- if (!power_up)
- /*save driver power off time stamp*/
- link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx);
- else
- link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx);
+ dp_trace_set_edp_power_timestamp(link, power_up);
DC_LOG_HW_RESUME_S3(
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
__func__,
- link->link_trace.time_stamp.edp_poweroff,
- link->link_trace.time_stamp.edp_poweron);
+ dp_trace_get_edp_poweroff_timestamp(link),
+ dp_trace_get_edp_poweron_timestamp(link));
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
@@ -942,14 +939,14 @@ void dce110_edp_wait_for_T12(
return;
if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
- link->link_trace.time_stamp.edp_poweroff != 0) {
+ dp_trace_get_edp_poweroff_timestamp(link) != 0) {
unsigned int t12_duration = 500; // Default T12 as per spec
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long time_since_edp_poweroff_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- link->link_trace.time_stamp.edp_poweroff), 1000000);
+ dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
t12_duration += link->local_sink->edid_caps.panel_patch.extra_t12_ms; // Add extra T12
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index fbff6beb78be..3a7f76e2c598 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1316,7 +1316,7 @@ void hubp1_set_flip_int(struct hubp *hubp)
*
* @hubp: hubp struct reference.
*/
-void hubp1_wait_pipe_read_start(struct hubp *hubp)
+static void hubp1_wait_pipe_read_start(struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 781334b395ba..e02ac75afbf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1259,6 +1259,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
{
int i;
struct dce_hwseq *hws = dc->hwseq;
+ struct hubbub *hubbub = dc->res_pool->hubbub;
bool can_apply_seamless_boot = false;
for (i = 0; i < context->stream_count; i++) {
@@ -1294,6 +1295,21 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
}
}
+ /* Reset det size */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = dc->res_pool->hubps[i];
+
+ /* Do not need to reset for seamless boot */
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
+ continue;
+
+ if (hubbub && hubp) {
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
+ }
+ }
+
/* num_opp will be equal to number of mpcc */
for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1359,6 +1375,11 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
+ if (tg->funcs->is_tg_enabled(tg)) {
+ if (tg->funcs->init_odm)
+ tg->funcs->init_odm(tg);
+ }
+
tg->funcs->tg_init(tg);
}
@@ -1493,8 +1514,12 @@ void dcn10_init_hw(struct dc *dc)
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
}
/* we want to turn off all dp displays before doing detection */
@@ -2522,14 +2547,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
@@ -2979,8 +3008,11 @@ void dcn10_prepare_bandwidth(
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ DC_FP_START();
dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_END();
+ }
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3013,8 +3045,11 @@ void dcn10_optimize_bandwidth(
dcn10_stereo_hw_frame_pack_wa(dc, context);
- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ DC_FP_START();
dcn_bw_notify_pplib_of_wm_ranges(dc);
+ DC_FP_END();
+ }
if (dc->debug.sanity_checks)
hws->funcs.verify_allow_pstate_change_high(dc);
@@ -3039,12 +3074,16 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
* as well.
*/
for (i = 0; i < num_pipes; i++) {
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(
+ pipe_ctx[i]->stream_res.tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx[i]->stream_res.tg,
+ event_triggers, num_frames);
+ }
}
}
@@ -3175,7 +3214,8 @@ void dcn10_wait_for_mpcc_disconnect(
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
- res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
+ if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
hubp->funcs->set_blank(hubp, true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 4048908dd265..bca049b2f867 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1141,6 +1141,20 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool)
*pool = NULL;
}
+static bool dcn10_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported;
+
+ DC_FP_START();
+ voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate);
+ DC_FP_END();
+
+ return voltage_supported;
+}
+
static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
{
if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
@@ -1492,6 +1506,7 @@ static bool dcn10_resource_construct(
&& pool->base.pp_smu->rv_funcs.set_pme_wa_enable != NULL)
dc->debug.az_endpoint_mute_only = false;
+ DC_FP_START();
if (!dc->debug.disable_pplib_clock_request)
dcn_bw_update_from_pplib(dc);
dcn_bw_sync_calcs_and_dml(dc);
@@ -1499,6 +1514,7 @@ static bool dcn10_resource_construct(
dc->res_pool = &pool->base;
dcn_bw_notify_pplib_of_wm_ranges(dc);
}
+ DC_FP_END();
{
struct irq_service_init_data init_data;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4290eaf11a04..b627c41713cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2344,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
- if (per_pixel_alpha)
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
- else
- blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
blnd_cfg.overlap_only = false;
blnd_cfg.global_gain = 0xff;
+ if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else if (per_pixel_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
if (pipe_ctx->plane_state->global_alpha)
blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 3fe4bfbb98a0..faab59508d82 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create(
return &clk_src->base;
}
+ kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index f61ec8763844..782b8db451b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -535,8 +535,12 @@ void dcn30_init_hw(struct dc *dc)
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
}
/* Power gate DSCs */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index e6a62cc75139..336b2ce6a636 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -2602,9 +2602,9 @@ static bool dcn30_resource_construct(
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 88318e8ffca8..f0938653bb88 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -1219,9 +1219,9 @@ static bool dcn302_resource_construct(
/* total size = mall per channel * num channels * 1024 * 1024 */
dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index d20e3b8ccc30..ec041e3cda30 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -15,32 +15,6 @@ DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o
dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
dcn31_afmt.o dcn31_vpg.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN31 = $(addprefix $(AMDDALPATH)/dc/dcn31/,$(DCN31))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN31)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
index 8ae6117953ca..197a5cae068b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
@@ -54,7 +54,8 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset)
REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
}
-void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized)
+static void hubp31_program_extended_blank(struct hubp *hubp,
+ unsigned int min_dst_y_next_start_optimized)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 631d8ac63aa4..531dd2c65007 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -188,8 +188,12 @@ void dcn31_init_hw(struct dc *dc)
/* Check for enabled DIG to identify enabled display */
if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
link->link_status.link_active = true;
+ if (link->link_enc->funcs->fec_is_active &&
+ link->link_enc->funcs->fec_is_active(link->link_enc))
+ link->fec_state = dc_link_fec_enabled;
+ }
}
/* Enables outbox notifications for usb4 dpia */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index d7559e5a99ce..e708f07fe75a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -153,9 +153,4 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
- if (dc->debug.disable_z10) {
- /*hw not support z10 or sw disable it*/
- dc->hwss.z10_restore = NULL;
- dc->hwss.z10_save_init = NULL;
- }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index e05527a3a8ba..c51f7dca94f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -91,8 +91,7 @@ static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, i
optc1->opp_count = opp_cnt;
}
-/**
- * Enable CRTC
+/*
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
static bool optc31_enable_crtc(struct timing_generator *optc)
@@ -214,6 +213,26 @@ void optc31_set_drr(
}
}
+void optc3_init_odm(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_5(OPTC_DATA_SOURCE_SELECT, 0,
+ OPTC_NUM_OF_INPUT_SEGMENT, 0,
+ OPTC_SEG0_SRC_SEL, optc->inst,
+ OPTC_SEG1_SRC_SEL, 0xf,
+ OPTC_SEG2_SRC_SEL, 0xf,
+ OPTC_SEG3_SRC_SEL, 0xf
+ );
+
+ REG_SET(OTG_H_TIMING_CNTL, 0,
+ OTG_H_TIMING_DIV_MODE, 0);
+
+ REG_SET(OPTC_MEMORY_CONFIG, 0,
+ OPTC_MEM_SEL, 0);
+ optc1->opp_count = 1;
+}
+
static struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -273,6 +292,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
+ .init_odm = optc3_init_odm,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
index a37b16040c1d..9e881f2ce74b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
@@ -258,4 +258,6 @@ void dcn31_timing_generator_init(struct optc *optc1);
void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
+void optc3_init_odm(struct timing_generator *optc);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 63934ecf6be8..5b3f0c2dfb55 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -65,6 +65,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31_panel_cntl.h"
@@ -102,152 +103,6 @@
#define DC_LOGGER_INIT(logger)
-#define DCN3_1_DEFAULT_DET_SIZE 384
-
-struct _vcs_dpi_ip_params_st dcn3_1_ip = {
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1792,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 3,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1200.0,
- .dppclk_mhz = 1200.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1030,6 +885,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
+ .disable_z10 = true,
.optimize_edp_link_rate = true,
.enable_sw_cntl_psr = true,
.apply_vendor_specific_lttpr_wa = true,
@@ -1812,7 +1668,6 @@ int dcn31_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.immediate_flip = true;
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
@@ -1869,143 +1724,6 @@ void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
}
}
-static void dcn31_calculate_wm_and_dlg_fp(
- struct dc *dc, struct dc_state *context,
- display_e2e_pipe_params_st *pipes,
- int pipe_cnt,
- int vlevel)
-{
- int i, pipe_idx;
- double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
-
- if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
- dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
-
- /* We don't recalculate clocks for 0 pipe configs, which can block
- * S0i3 as high clocks will block low power states
- * Override any clocks that can block S0i3 to min here
- */
- if (pipe_cnt == 0) {
- context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
- return;
- }
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
- pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
-
-#if 0 // TODO
- /* Set B:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
- if (vlevel == 0) {
- pipes[0].clks_cfg.voltage = 1;
- pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
- }
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- pipes[0].clks_cfg.voltage = vlevel;
- pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
-
- /* Set C:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
- /* Set D:
- * TODO
- */
- if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
- context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
- context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
- context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
- }
- context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
-
- /* Set A:
- * All clocks min required
- *
- * Set A calculated last so that following calculations are based on Set A
- */
- dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
- context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
- /* TODO: remove: */
- context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- /* end remove*/
-
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
-
- pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
- pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
-
- if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
- pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
- pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
- }
- if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
- if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
- pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
-
- pipe_idx++;
- }
-
- DC_FP_START();
- dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- DC_FP_END();
-}
-
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -2075,77 +1793,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
-
- dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_1_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
-
- clock_limits[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_1_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_1_soc.num_states = clk_table->num_entries;
- }
- }
-
- dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA);
-}
-
static struct resource_funcs dcn31_res_pool_funcs = {
.destroy = dcn31_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -2223,9 +1870,9 @@ static bool dcn31_resource_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.max_slave_planes = 1;
- dc->caps.max_slave_yuv_planes = 1;
- dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.max_slave_planes = 2;
+ dc->caps.max_slave_yuv_planes = 2;
+ dc->caps.max_slave_rgb_planes = 2;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 4b7ab21ea15b..1ce6509c1ed1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -31,6 +31,9 @@
#define TO_DCN31_RES_POOL(pool)\
container_of(pool, struct dcn31_resource_pool, base)
+extern struct _vcs_dpi_ip_params_st dcn3_1_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc;
+
struct dcn31_resource_pool {
struct resource_pool base;
};
@@ -47,7 +50,6 @@ int dcn31_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
-void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
struct resource_pool *dcn31_create_resource_pool(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
index c831ad46e81c..59381d24800b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
@@ -25,32 +25,6 @@
DCN315 = dcn315_resource.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn315/dcn315_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN315)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
index 06adb77c206b..e6f9312e3a48 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
@@ -66,6 +66,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -133,158 +134,9 @@
#include "link_enc_cfg.h"
-#define DC_LOGGER_INIT(logger)
-
-#define DCN3_15_DEFAULT_DET_SIZE 192
#define DCN3_15_MAX_DET_SIZE 384
-#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
#define DCN3_15_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_15_ip = {
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_15_DEFAULT_DET_SIZE,
- .min_comp_buffer_size_kbytes = DCN3_15_MIN_COMPBUF_SIZE_KB,
- .config_return_buffer_size_in_kbytes = 1024,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 3,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 49,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 9,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1372.0,
- .dppclk_mhz = 1372.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 600.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 50.0,
- .sr_enter_plus_exit_z8_time_us = 50.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.38,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1859,88 +1711,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-static void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_15_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_15_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_15_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_15_soc.num_states - 1;
- }
-
- clock_limits[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_limits[i].dcfclk_mhz < dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_limits[i].dcfclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_15_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_15_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_15_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_15_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_15_soc.num_states = clk_table->num_entries;
- }
- }
-
- if (max_dispclk_mhz) {
- dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
-}
-
static struct resource_funcs dcn315_res_pool_funcs = {
.destroy = dcn315_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1988,11 +1758,10 @@ static bool dcn315_resource_construct(
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
- dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
index f3a36820a31f..39929fa67a51 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
@@ -31,6 +31,9 @@
#define TO_DCN315_RES_POOL(pool)\
container_of(pool, struct dcn315_resource_pool, base)
+extern struct _vcs_dpi_ip_params_st dcn3_15_ip;
+extern struct _vcs_dpi_ip_params_st dcn3_15_soc;
+
struct dcn315_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
index cd87b687c5e2..819d44a9439b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
@@ -25,32 +25,6 @@
DCN316 = dcn316_resource.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn316/dcn316_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN316)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index 8decc3ccf8ca..d5c195749a81 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -66,6 +66,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
+#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -123,157 +124,10 @@
#include "link_enc_cfg.h"
-#define DC_LOGGER_INIT(logger)
-
-#define DCN3_16_DEFAULT_DET_SIZE 192
#define DCN3_16_MAX_DET_SIZE 384
#define DCN3_16_MIN_COMPBUF_SIZE_KB 128
#define DCN3_16_CRB_SEGMENT_SIZE_KB 64
-struct _vcs_dpi_ip_params_st dcn3_16_ip = {
- .gpuvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_enable = 1,
- .hostvm_max_page_table_levels = 2,
- .rob_buffer_size_kbytes = 64,
- .det_buffer_size_kbytes = DCN3_16_DEFAULT_DET_SIZE,
- .config_return_buffer_size_in_kbytes = 1024,
- .compressed_buffer_segment_size_in_kbytes = 64,
- .meta_fifo_size_in_kentries = 32,
- .zero_size_buffer_entries = 512,
- .compbuf_reserved_space_64b = 256,
- .compbuf_reserved_space_zs = 64,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .min_meta_chunk_size_bytes = 256,
- .writeback_chunk_size_kbytes = 8,
- .ptoi_supported = false,
- .num_dsc = 3,
- .maximum_dsc_bits_per_component = 10,
- .dsc422_native_support = false,
- .is_line_buffer_bpp_fixed = true,
- .line_buffer_fixed_bpp = 48,
- .line_buffer_size_bits = 789504,
- .max_line_buffer_lines = 12,
- .writeback_interface_buffer_size_kbytes = 90,
- .max_num_dpp = 4,
- .max_num_otg = 4,
- .max_num_hdmi_frl_outputs = 1,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 34,
- .dispclk_ramp_margin_percent = 1,
- .max_inter_dcn_tile_repeaters = 8,
- .cursor_buffer_size = 16,
- .cursor_chunk_size = 2,
- .writeback_line_buffer_buffer_size = 0,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .dppclk_delay_subtotal = 46,
- .dppclk_delay_scl = 50,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dynamic_metadata_vm_enabled = false,
- .odm_combine_4to1_supported = false,
- .dcc_supported = true,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
- /*TODO: correct dispclk/dppclk voltage level determination*/
- .clock_limits = {
- {
- .state = 0,
- .dispclk_mhz = 556.0,
- .dppclk_mhz = 556.0,
- .phyclk_mhz = 600.0,
- .phyclk_d18_mhz = 445.0,
- .dscclk_mhz = 186.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 1,
- .dispclk_mhz = 625.0,
- .dppclk_mhz = 625.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 2,
- .dispclk_mhz = 625.0,
- .dppclk_mhz = 625.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 209.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 3,
- .dispclk_mhz = 1112.0,
- .dppclk_mhz = 1112.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 371.0,
- .dtbclk_mhz = 625.0,
- },
- {
- .state = 4,
- .dispclk_mhz = 1250.0,
- .dppclk_mhz = 1250.0,
- .phyclk_mhz = 810.0,
- .phyclk_d18_mhz = 667.0,
- .dscclk_mhz = 417.0,
- .dtbclk_mhz = 625.0,
- },
- },
- .num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 442.0,
- .sr_enter_plus_exit_z8_time_us = 560.0,
- .writeback_latency_us = 12.0,
- .dram_channel_width_bytes = 4,
- .round_trip_ping_latency_dcfclk_cycles = 106,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .return_bus_width_bytes = 64,
- .downspread_percent = 0.38,
- .dcn_downspread_percent = 0.5,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn31_clk_src_array_id {
DCN31_CLK_SRC_PLL0,
DCN31_CLK_SRC_PLL1,
@@ -1859,89 +1713,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-static void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
-
- dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
- dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
- dcn3_16_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
-
- /* Prepass to find max clocks independent of voltage level. */
- for (i = 0; i < clk_table->num_entries; ++i) {
- if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
- max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
- if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
- max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
- }
-
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
- // Ported from DCN315
- if (clk_table->num_entries == 1) {
- /*smu gives one DPM level, let's take the highest one*/
- closest_clk_lvl = dcn3_16_soc.num_states - 1;
- }
-
- clock_limits[i].state = i;
-
- /* Clocks dependent on voltage level. */
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- if (clk_table->num_entries == 1 &&
- clock_limits[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
- /*SMU fix not released yet*/
- clock_limits[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
- }
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
-
- /* Clocks independent of voltage level. */
- clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
- dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-
- clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
- dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_16_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_16_soc.num_states = clk_table->num_entries;
- }
- }
-
- if (max_dispclk_mhz) {
- dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
- }
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
- else
- dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
-}
-
static struct resource_funcs dcn316_res_pool_funcs = {
.destroy = dcn316_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1989,11 +1760,10 @@ static bool dcn316_resource_construct(
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
- dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.4 w/a applied by default*/
+ dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
-
dc->caps.max_slave_planes = 1;
dc->caps.max_slave_yuv_planes = 1;
dc->caps.max_slave_rgb_planes = 1;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
index 9d0d60cb9482..0dc5a6c13ae7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
@@ -31,6 +31,9 @@
#define TO_DCN316_RES_POOL(pool)\
container_of(pool, struct dcn316_resource_pool, base)
+extern struct _vcs_dpi_ip_params_st dcn3_16_ip;
+extern struct _vcs_dpi_ip_params_st dcn3_16_soc;
+
struct dcn316_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 28978ce62f87..ee911452c048 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -71,6 +71,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn303/dcn303_fpu.o := $(dml_ccflags)
@@ -114,6 +115,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn31/dcn31_fpu.o
DML += dcn301/dcn301_fpu.o
DML += dcn302/dcn302_fpu.o
DML += dcn303/dcn303_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index e447c74be713..db3b16b77034 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -639,7 +639,6 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
{
bool updated = false;
- DC_FP_START();
if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
&& dc->debug.sr_exit_time_ns) {
updated = true;
@@ -675,7 +674,6 @@ static bool dcn_bw_apply_registry_override(struct dc *dc)
dc->dcn_soc->dram_clock_change_latency =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
- DC_FP_END();
return updated;
}
@@ -764,7 +762,7 @@ static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family,
return 4;
}
-bool dcn10_validate_bandwidth(
+bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -790,7 +788,6 @@ bool dcn10_validate_bandwidth(
dcn_bw_sync_calcs_and_dml(dc);
memset(v, 0, sizeof(*v));
- DC_FP_START();
v->sr_exit_time = dc->dcn_soc->sr_exit_time;
v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
@@ -1323,8 +1320,6 @@ bool dcn10_validate_bandwidth(
bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
- DC_FP_END();
-
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
@@ -1495,8 +1490,6 @@ void dcn_bw_update_from_pplib(struct dc *dc)
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
- DC_FP_START();
-
if (res)
res = verify_clock_values(&fclks);
@@ -1526,13 +1519,9 @@ void dcn_bw_update_from_pplib(struct dc *dc)
} else
BREAK_TO_DEBUGGER();
- DC_FP_END();
-
res = dm_pp_get_clock_levels_by_type_with_voltage(
ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
- DC_FP_START();
-
if (res)
res = verify_clock_values(&dcfclks);
@@ -1543,8 +1532,6 @@ void dcn_bw_update_from_pplib(struct dc *dc)
dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0;
} else
BREAK_TO_DEBUGGER();
-
- DC_FP_END();
}
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
@@ -1559,11 +1546,9 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
if (!pp || !pp->set_wm_ranges)
return;
- DC_FP_START();
min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
socclk_khz = dc->dcn_soc->socclk * 1000;
- DC_FP_END();
/* Now notify PPLib/SMU about which Watermarks sets they should select
* depending on DPM state they are in. And update BW MGR GFX Engine and
@@ -1614,7 +1599,6 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
void dcn_bw_sync_calcs_and_dml(struct dc *dc)
{
- DC_FP_START();
DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n"
"sr_enter_plus_exit_time: %f ns\n"
"urgent_latency: %f ns\n"
@@ -1803,5 +1787,4 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
- DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index f93af45aeab4..f79dd40f8d81 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1302,9 +1302,7 @@ int dcn20_populate_dml_pipes_from_context(
}
/* populate writeback information */
- DC_FP_START();
dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
- DC_FP_END();
return pipe_cnt;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
new file mode 100644
index 000000000000..a0a2e125c9c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "resource.h"
+#include "clk_mgr.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dcn31_fpu.h"
+
+/**
+ * DOC: DCN31x FPU manipulation Overview
+ *
+ * The DCN architecture relies on FPU operations, which require special
+ * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
+ * want to avoid spreading FPU access across multiple files. With this idea in
+ * mind, this file aims to centralize all DCN3.1.x functions that require FPU
+ * access in a single place. Code in this file follows the following code
+ * pattern:
+ *
+ * 1. Functions that use FPU operations should be isolated in static functions.
+ * 2. The FPU functions should have the noinline attribute to ensure anything
+ * that deals with FP register is contained within this call.
+ * 3. All function that needs to be accessed outside this file requires a
+ * public interface that not uses any FPU reference.
+ * 4. Developers **must not** use DC_FP_START/END in this file, but they need
+ * to ensure that the caller invokes it before access any function available
+ * in this file. For this reason, public functions in this file must invoke
+ * dc_assert_fp_enabled();
+ */
+
+struct _vcs_dpi_ip_params_st dcn3_1_ip = {
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 3,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 625.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+struct _vcs_dpi_ip_params_st dcn3_15_ip = {
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_15_DEFAULT_DET_SIZE,
+ .min_comp_buffer_size_kbytes = DCN3_15_MIN_COMPBUF_SIZE_KB,
+ .config_return_buffer_size_in_kbytes = 1024,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 3,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 49,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 9,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 1372.0,
+ .dppclk_mhz = 1372.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 1372.0,
+ .dppclk_mhz = 1372.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 1372.0,
+ .dppclk_mhz = 1372.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1372.0,
+ .dppclk_mhz = 1372.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1372.0,
+ .dppclk_mhz = 1372.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 50.0,
+ .sr_enter_plus_exit_z8_time_us = 50.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.38,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+struct _vcs_dpi_ip_params_st dcn3_16_ip = {
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = DCN3_16_DEFAULT_DET_SIZE,
+ .config_return_buffer_size_in_kbytes = 1024,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 3,
+ .maximum_dsc_bits_per_component = 10,
+ .dsc422_native_support = false,
+ .is_line_buffer_bpp_fixed = true,
+ .line_buffer_fixed_bpp = 48,
+ .line_buffer_size_bits = 789504,
+ .max_line_buffer_lines = 12,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 34,
+ .dispclk_ramp_margin_percent = 1,
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 46,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dispclk_mhz = 556.0,
+ .dppclk_mhz = 556.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 445.0,
+ .dscclk_mhz = 186.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 1,
+ .dispclk_mhz = 625.0,
+ .dppclk_mhz = 625.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 2,
+ .dispclk_mhz = 625.0,
+ .dppclk_mhz = 625.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 209.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 3,
+ .dispclk_mhz = 1112.0,
+ .dppclk_mhz = 1112.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 371.0,
+ .dtbclk_mhz = 625.0,
+ },
+ {
+ .state = 4,
+ .dispclk_mhz = 1250.0,
+ .dppclk_mhz = 1250.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 417.0,
+ .dtbclk_mhz = 625.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
+ .writeback_latency_us = 12.0,
+ .dram_channel_width_bytes = 4,
+ .round_trip_ping_latency_dcfclk_cycles = 106,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+void dcn31_calculate_wm_and_dlg_fp(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx;
+ double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+
+ dc_assert_fp_enabled();
+
+ if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
+ dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+
+ /* We don't recalculate clocks for 0 pipe configs, which can block
+ * S0i3 as high clocks will block low power states
+ * Override any clocks that can block S0i3 to min here
+ */
+ if (pipe_cnt == 0) {
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
+ return;
+ }
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+#if 0 // TODO
+ /* Set B:
+ * TODO
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ if (vlevel == 0) {
+ pipes[0].clks_cfg.voltage = 1;
+ pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ }
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+
+ /* Set C:
+ * TODO
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Set D:
+ * TODO
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+#endif
+
+ /* Set A:
+ * All clocks min required
+ *
+ * Set A calculated last so that following calculations are based on Set A
+ */
+ dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ /* TODO: remove: */
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
+ /* end remove*/
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
+
+void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dcn3_1_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_1_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_1_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_1_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_1_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_1_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ dcn3_1_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_1_soc, &dcn3_1_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+
+ dcn3_15_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_15_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_15_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_15_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_15_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_15_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_15_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_15_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_15_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_15_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_15_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_15_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_15_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_15_soc, &dcn3_15_ip, DML_PROJECT_DCN31_FPGA);
+}
+
+void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+
+ dcn3_16_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
+ dcn3_16_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_16_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_16_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_16_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ // Ported from DCN315
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_16_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz < dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2 * clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_16_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_16_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_16_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_16_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn3_16_soc.num_states = clk_table->num_entries;
+ }
+ }
+
+ if (max_dispclk_mhz) {
+ dcn3_16_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31);
+ else
+ dml_init_instance(&dc->dml, &dcn3_16_soc, &dcn3_16_ip, DML_PROJECT_DCN31_FPGA);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
new file mode 100644
index 000000000000..24ac19c83687
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN31_FPU_H__
+#define __DCN31_FPU_H__
+
+#define DCN3_1_DEFAULT_DET_SIZE 384
+#define DCN3_15_DEFAULT_DET_SIZE 192
+#define DCN3_15_MIN_COMPBUF_SIZE_KB 128
+#define DCN3_16_DEFAULT_DET_SIZE 192
+
+void dcn31_calculate_wm_and_dlg_fp(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+
+void dcn31_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn315_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+#endif /* __DCN31_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 337c0161e72d..806f3041db14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -619,7 +619,7 @@ struct dcn_ip_params {
};
extern const struct dcn_ip_params dcn10_ip_defaults;
-bool dcn10_validate_bandwidth(
+bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 59a704781e34..554d2e33bd7f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -310,6 +310,8 @@ struct timing_generator_funcs {
uint32_t slave_pixel_clock_100Hz,
uint8_t master_clock_divider,
uint8_t slave_clock_divider);
+
+ void (*init_odm)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index 3b3090e3d327..e6c49ef8b584 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -37,9 +37,12 @@ struct dc_link;
struct link_resource;
struct pipe_ctx;
struct encoder_set_dp_phy_pattern_param;
+struct link_mst_stream_allocation_table;
struct link_hwss_ext {
- /* function pointers below require check for NULL at all time
+ /* function pointers below may require to check for NULL if caller
+ * considers missing implementation as expected in some cases or none
+ * critical to be investigated immediately
* *********************************************************************
*/
void (*set_hblank_min_symbol_width)(struct pipe_ctx *pipe_ctx,
@@ -62,6 +65,9 @@ struct link_hwss_ext {
const struct link_resource *link_res,
const struct dc_link_settings *link_settings,
const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+ void (*update_stream_allocation_table)(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table);
};
struct link_hwss {
@@ -72,6 +78,7 @@ struct link_hwss {
*/
void (*setup_stream_encoder)(struct pipe_ctx *pipe_ctx);
void (*reset_stream_encoder)(struct pipe_ctx *pipe_ctx);
+ void (*setup_stream_attribute)(struct pipe_ctx *pipe_ctx);
};
#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
index e7047391934b..2c1a3bfcdb50 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
@@ -144,3 +144,23 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
{
return link->dp_trace.link_loss_count;
}
+
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up)
+{
+ if (!power_up)
+ /*save driver power off time stamp*/
+ link->dp_trace.edp_trace_power_timestamps.poweroff = dm_get_timestamp(link->dc->ctx);
+ else
+ link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx);
+}
+
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
+{
+ return link->dp_trace.edp_trace_power_timestamps.poweron;
+}
+
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
+{
+ return link->dp_trace.edp_trace_power_timestamps.poweroff;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
index 702f97c6ead0..26700e3cd65e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
@@ -54,4 +54,9 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
+void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up);
+uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
+uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
+
#endif /* __LINK_DP_TRACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
index 0f845113a6aa..776e822abcbb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
@@ -62,6 +62,46 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
}
+void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ if (!dc_is_virtual_signal(stream->signal))
+ stream_encoder->funcs->setup_stereo_sync(
+ stream_encoder,
+ pipe_ctx->stream_res.tg->inst,
+ stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
+
+ if (dc_is_dp_signal(stream->signal))
+ stream_encoder->funcs->dp_set_stream_attribute(
+ stream_encoder,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ else if (dc_is_hdmi_tmds_signal(stream->signal))
+ stream_encoder->funcs->hdmi_set_stream_attribute(
+ stream_encoder,
+ &stream->timing,
+ stream->phy_pix_clk,
+ pipe_ctx->stream_res.audio != NULL);
+ else if (dc_is_dvi_signal(stream->signal))
+ stream_encoder->funcs->dvi_set_stream_attribute(
+ stream_encoder,
+ &stream->timing,
+ (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+ true : false);
+ else if (dc_is_lvds_signal(stream->signal))
+ stream_encoder->funcs->lvds_set_stream_attribute(
+ stream_encoder,
+ &stream->timing);
+
+ if (dc_is_dp_signal(stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+}
+
void enable_dio_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
@@ -113,15 +153,27 @@ void set_dio_dp_lane_settings(struct dc_link *link,
link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings);
}
+static void update_dio_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+
+ ASSERT(link_enc);
+ link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+}
+
static const struct link_hwss dio_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
.disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
+ .update_stream_allocation_table = update_dio_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
index 680df20b1fa3..08f22b32df48 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
@@ -34,6 +34,7 @@ void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx);
void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx);
+void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx);
void enable_dio_dp_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
index 35b206225201..89d4e8159138 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
@@ -25,16 +25,44 @@
#include "link_hwss_dpia.h"
#include "core_types.h"
#include "link_hwss_dio.h"
+#include "link_enc_cfg.h"
+
+#define DC_LOGGER_INIT(logger)
+
+static void update_dpia_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
+ static enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+ int i;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ for (i = 0; i < table->stream_count; i++)
+ mst_alloc_slots += table->stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status == DC_OK);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
+
+ ASSERT(link_enc);
+ link_enc->funcs->update_mst_stream_allocation_table(link_enc, table);
+}
static const struct link_hwss dpia_link_hwss = {
.setup_stream_encoder = setup_dio_stream_encoder,
.reset_stream_encoder = reset_dio_stream_encoder,
+ .setup_stream_attribute = setup_dio_stream_attribute,
.ext = {
.set_throttled_vcp_size = set_dio_throttled_vcp_size,
.enable_dp_link_output = enable_dio_dp_link_output,
.disable_dp_link_output = disable_dio_dp_link_output,
.set_dp_link_test_pattern = set_dio_dp_link_test_pattern,
.set_dp_lane_settings = set_dio_dp_lane_settings,
+ .update_stream_allocation_table = update_dpia_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index 74919491675f..87972dc8443d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -131,6 +131,22 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst);
}
+static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ stream_enc->funcs->set_stream_attribute(
+ stream_enc,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ stream->timing.flags.DSC,
+ false);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+}
+
static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
const struct link_resource *link_res,
enum signal_type signal,
@@ -228,9 +244,19 @@ static void set_hpo_dp_lane_settings(struct dc_link *link,
lane_settings[0].FFE_PRESET.raw);
}
+static void update_hpo_dp_stream_allocation_table(struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_mst_stream_allocation_table *table)
+{
+ link_res->hpo_dp_link_enc->funcs->update_stream_allocation_table(
+ link_res->hpo_dp_link_enc,
+ table);
+}
+
static const struct link_hwss hpo_dp_link_hwss = {
.setup_stream_encoder = setup_hpo_dp_stream_encoder,
.reset_stream_encoder = reset_hpo_dp_stream_encoder,
+ .setup_stream_attribute = setup_hpo_dp_stream_attribute,
.ext = {
.set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size,
.set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width,
@@ -238,6 +264,7 @@ static const struct link_hwss hpo_dp_link_hwss = {
.disable_dp_link_output = disable_hpo_dp_link_output,
.set_dp_link_test_pattern = set_hpo_dp_link_test_pattern,
.set_dp_lane_settings = set_hpo_dp_lane_settings,
+ .update_stream_allocation_table = update_hpo_dp_stream_allocation_table,
},
};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
index 9df273ca699b..4b5eccd994c4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c
@@ -26,9 +26,28 @@
#include "core_types.h"
#include "virtual/virtual_link_hwss.h"
+static void setup_hpo_frl_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct hpo_frl_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_frl_stream_enc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe;
+ int odm_combine_num_segments = 1;
+
+ /* get number of ODM combine input segments */
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_combine_num_segments++;
+
+ stream_enc->funcs->hdmi_frl_set_stream_attribute(
+ stream_enc,
+ &stream->timing,
+ &stream->link->frl_link_settings.borrow_params,
+ odm_combine_num_segments);
+}
+
static const struct link_hwss hpo_frl_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
+ .setup_stream_attribute = setup_hpo_frl_stream_attribute,
};
bool can_use_hpo_frl_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
index 525eba2a3354..501173ce270e 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c
@@ -29,12 +29,17 @@ void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
+void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx)
+{
+}
+
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx)
{
}
static const struct link_hwss virtual_link_hwss = {
.setup_stream_encoder = virtual_setup_stream_encoder,
.reset_stream_encoder = virtual_reset_stream_encoder,
+ .setup_stream_attribute = virtual_setup_stream_attribute,
};
const struct link_hwss *get_virtual_link_hwss(void)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
index e6bcb4bb0f3a..fbcbc5afb47d 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h
@@ -28,6 +28,7 @@
#include "core_types.h"
void virtual_setup_stream_encoder(struct pipe_ctx *pipe_ctx);
+void virtual_setup_stream_attribute(struct pipe_ctx *pipe_ctx);
void virtual_reset_stream_encoder(struct pipe_ctx *pipe_ctx);
const struct link_hwss *get_virtual_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 71214c7a60fc..05c8d91ad4ab 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -44,24 +44,6 @@
#endif // defined(_TEST_HARNESS) || defined(FPGA_USB4)
-/* Firmware versioning. */
-#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x929554ba
-#define DMUB_FW_VERSION_MAJOR 0
-#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 108
-#define DMUB_FW_VERSION_TEST 0
-#define DMUB_FW_VERSION_VBIOS 0
-#define DMUB_FW_VERSION_HOTFIX 0
-#define DMUB_FW_VERSION_UCODE (((DMUB_FW_VERSION_MAJOR & 0xFF) << 24) | \
- ((DMUB_FW_VERSION_MINOR & 0xFF) << 16) | \
- ((DMUB_FW_VERSION_REVISION & 0xFF) << 8) | \
- ((DMUB_FW_VERSION_TEST & 0x1) << 7) | \
- ((DMUB_FW_VERSION_VBIOS & 0x1) << 6) | \
- (DMUB_FW_VERSION_HOTFIX & 0x3F))
-
-#endif
-
//<DMUB_TYPES>==================================================================
/* Basic type definitions. */
@@ -1523,8 +1505,6 @@ enum dmub_phy_fsm_state {
DMUB_PHY_FSM_FAST_LP,
};
-
-
/**
* Data passed from driver to FW in a DMUB_CMD__PSR_COPY_SETTINGS command.
*/
@@ -1704,9 +1684,16 @@ struct dmub_rb_cmd_psr_enable_data {
*/
uint8_t panel_inst;
/**
- * Explicit padding to 4 byte boundary.
+ * Phy state to enter.
+ * Values to use are defined in dmub_phy_fsm_state
*/
- uint8_t pad[2];
+ uint8_t phy_fsm_state;
+ /**
+ * Phy rate for DP - RBR/HBR/HBR2/HBR3.
+ * Set this using enum phy_link_rate.
+ * This does not support HDMI/DP2 for now.
+ */
+ uint8_t phy_rate;
};
/**
@@ -1772,16 +1759,9 @@ struct dmub_cmd_psr_force_static_data {
*/
uint8_t panel_inst;
/**
- * Phy state to enter.
- * Values to use are defined in dmub_phy_fsm_state
- */
- uint8_t phy_fsm_state;
- /**
- * Phy rate for DP - RBR/HBR/HBR2/HBR3.
- * Set this using enum phy_link_rate.
- * This does not support HDMI/DP2 for now.
+ * Explicit padding to 4 byte boundary.
*/
- uint8_t phy_rate;
+ uint8_t pad[2];
};
/**
@@ -3044,9 +3024,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr);
- //uint64_t volatile *p = (uint64_t volatile *)data;
- uint64_t temp;
+ uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
/* Don't remove this.
@@ -3054,7 +3032,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
* for this function to be effective.
*/
for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
- temp = *data++;
+ (void)READ_ONCE(*data++);
rptr += DMUB_RB_CMD_SIZE;
if (rptr >= rb->capacity)
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index fed1edc038d8..c6bbd262f1ac 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -162,6 +162,7 @@ enum connector_id {
CONNECTOR_ID_MXM = 21,
CONNECTOR_ID_WIRELESS = 22,
CONNECTOR_ID_MIRACAST = 23,
+ CONNECTOR_ID_USBC = 24,
CONNECTOR_ID_VIRTUAL = 100
};
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bd1d1dc93629..03fa63d56fa6 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -46,6 +46,10 @@
/* Number of consecutive frames to check before entering/exiting fixed refresh */
#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
#define FIXED_REFRESH_EXIT_FRAME_COUNT 10
+/* Flip interval workaround constants */
+#define VSYNCS_BETWEEN_FLIP_THRESHOLD 2
+#define FREESYNC_CONSEC_FLIP_AFTER_VSYNC 5
+#define FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US 500
struct core_freesync {
struct mod_freesync public;
@@ -466,6 +470,41 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
}
}
+static void determine_flip_interval_workaround_req(struct mod_vrr_params *in_vrr,
+ unsigned int curr_time_stamp_in_us)
+{
+ in_vrr->flip_interval.vsync_to_flip_in_us = curr_time_stamp_in_us -
+ in_vrr->flip_interval.v_update_timestamp_in_us;
+
+ /* Determine conditions for stopping workaround */
+ if (in_vrr->flip_interval.flip_interval_workaround_active &&
+ in_vrr->flip_interval.vsyncs_between_flip < VSYNCS_BETWEEN_FLIP_THRESHOLD &&
+ in_vrr->flip_interval.vsync_to_flip_in_us > FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US) {
+ in_vrr->flip_interval.flip_interval_detect_counter = 0;
+ in_vrr->flip_interval.program_flip_interval_workaround = true;
+ in_vrr->flip_interval.flip_interval_workaround_active = false;
+ } else {
+ /* Determine conditions for starting workaround */
+ if (in_vrr->flip_interval.vsyncs_between_flip >= VSYNCS_BETWEEN_FLIP_THRESHOLD &&
+ in_vrr->flip_interval.vsync_to_flip_in_us < FREESYNC_VSYNC_TO_FLIP_DELTA_IN_US) {
+ /* Increase flip interval counter we have 2 vsyncs between flips and
+ * vsync to flip interval is less than 500us
+ */
+ in_vrr->flip_interval.flip_interval_detect_counter++;
+ if (in_vrr->flip_interval.flip_interval_detect_counter > FREESYNC_CONSEC_FLIP_AFTER_VSYNC) {
+ /* Start workaround if we detect 5 consecutive instances of the above case */
+ in_vrr->flip_interval.program_flip_interval_workaround = true;
+ in_vrr->flip_interval.flip_interval_workaround_active = true;
+ }
+ } else {
+ /* Reset the flip interval counter if we condition is no longer met */
+ in_vrr->flip_interval.flip_interval_detect_counter = 0;
+ }
+ }
+
+ in_vrr->flip_interval.vsyncs_between_flip = 0;
+}
+
static bool vrr_settings_require_update(struct core_freesync *core_freesync,
struct mod_freesync_config *in_config,
unsigned int min_refresh_in_uhz,
@@ -1179,6 +1218,9 @@ void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync,
in_out_vrr);
}
+ determine_flip_interval_workaround_req(in_out_vrr,
+ curr_time_stamp_in_us);
+
}
}
@@ -1187,6 +1229,8 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
struct mod_vrr_params *in_out_vrr)
{
struct core_freesync *core_freesync = NULL;
+ unsigned int cur_timestamp_in_us;
+ unsigned long long cur_tick;
if ((mod_freesync == NULL) || (stream == NULL) || (in_out_vrr == NULL))
return;
@@ -1196,6 +1240,36 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
if (in_out_vrr->supported == false)
return;
+ cur_tick = dm_get_timestamp(core_freesync->dc->ctx);
+ cur_timestamp_in_us = (unsigned int)
+ div_u64(dm_get_elapse_time_in_ns(core_freesync->dc->ctx, cur_tick, 0), 1000);
+
+ in_out_vrr->flip_interval.vsyncs_between_flip++;
+ in_out_vrr->flip_interval.v_update_timestamp_in_us = cur_timestamp_in_us;
+
+ if (in_out_vrr->state == VRR_STATE_ACTIVE_VARIABLE &&
+ (in_out_vrr->flip_interval.flip_interval_workaround_active ||
+ (!in_out_vrr->flip_interval.flip_interval_workaround_active &&
+ in_out_vrr->flip_interval.program_flip_interval_workaround))) {
+ // set freesync vmin vmax to nominal for workaround
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(
+ stream, in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max =
+ in_out_vrr->adjust.v_total_min;
+ in_out_vrr->flip_interval.program_flip_interval_workaround = false;
+ in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup = true;
+ return;
+ }
+
+ if (in_out_vrr->state != VRR_STATE_ACTIVE_VARIABLE &&
+ in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup) {
+ in_out_vrr->flip_interval.do_flip_interval_workaround_cleanup = false;
+ in_out_vrr->flip_interval.flip_interval_detect_counter = 0;
+ in_out_vrr->flip_interval.vsyncs_between_flip = 0;
+ in_out_vrr->flip_interval.vsync_to_flip_in_us = 0;
+ }
+
/* Below the Range Logic */
/* Only execute if in fullscreen mode */
@@ -1302,7 +1376,7 @@ unsigned long long mod_freesync_calc_field_rate_from_timing(
bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
uint32_t max_refresh_cap_in_uhz,
- uint32_t nominal_field_rate_in_uhz)
+ uint32_t nominal_field_rate_in_uhz)
{
/* Typically nominal refresh calculated can have some fractional part.
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 3e81850a7ffe..5e01c6e24cbc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -251,6 +251,33 @@ out:
return status;
}
+static enum mod_hdcp_status update_display_adjustments(struct mod_hdcp *hdcp,
+ struct mod_hdcp_display *display,
+ struct mod_hdcp_display_adjustment *adj)
+{
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_NOT_IMPLEMENTED;
+
+ if (is_in_authenticated_states(hdcp) &&
+ is_dp_mst_hdcp(hdcp) &&
+ display->adjust.disable == true &&
+ adj->disable == false) {
+ display->adjust.disable = false;
+ if (is_hdcp1(hdcp))
+ status = mod_hdcp_hdcp1_enable_dp_stream_encryption(hdcp);
+ else if (is_hdcp2(hdcp))
+ status = mod_hdcp_hdcp2_enable_dp_stream_encryption(hdcp);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ display->adjust.disable = true;
+ }
+
+ if (status == MOD_HDCP_STATUS_SUCCESS &&
+ memcmp(adj, &display->adjust,
+ sizeof(struct mod_hdcp_display_adjustment)) != 0)
+ status = MOD_HDCP_STATUS_NOT_IMPLEMENTED;
+
+ return status;
+}
/*
* Implementation of functions in mod_hdcp.h
*/
@@ -391,7 +418,7 @@ out:
return status;
}
-enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp,
+enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
uint8_t index,
struct mod_hdcp_link_adjustment *link_adjust,
struct mod_hdcp_display_adjustment *display_adjust,
@@ -419,6 +446,15 @@ enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp,
goto out;
}
+ if (memcmp(link_adjust, &hdcp->connection.link.adjust,
+ sizeof(struct mod_hdcp_link_adjustment)) == 0 &&
+ memcmp(display_adjust, &display->adjust,
+ sizeof(struct mod_hdcp_display_adjustment)) != 0) {
+ status = update_display_adjustments(hdcp, display, display_adjust);
+ if (status != MOD_HDCP_STATUS_NOT_IMPLEMENTED)
+ goto out;
+ }
+
/* stop current authentication */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 392c0c03365a..55c7d873175f 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -445,6 +445,14 @@ static inline uint8_t is_in_hdcp2_dp_states(struct mod_hdcp *hdcp)
current_state(hdcp) <= HDCP2_DP_STATE_END);
}
+static inline uint8_t is_in_authenticated_states(struct mod_hdcp *hdcp)
+{
+ return (current_state(hdcp) == D1_A4_AUTHENTICATED ||
+ current_state(hdcp) == H1_A45_AUTHENTICATED ||
+ current_state(hdcp) == D2_A5_AUTHENTICATED ||
+ current_state(hdcp) == H2_A5_AUTHENTICATED);
+}
+
static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp)
{
return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp));
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 6ec918af3bff..1ddb4f5eac8e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -497,9 +497,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp,
return status;
}
-extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
- struct mod_hdcp_event_context *event_ctx,
- struct mod_hdcp_transition_input_hdcp1 *input)
+enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp,
+ struct mod_hdcp_event_context *event_ctx,
+ struct mod_hdcp_transition_input_hdcp1 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 75a158a2514c..cf6bc9446244 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -105,6 +105,16 @@ struct mod_vrr_params_fixed_refresh {
uint32_t frame_counter;
};
+struct mod_vrr_params_flip_interval {
+ bool flip_interval_workaround_active;
+ bool program_flip_interval_workaround;
+ bool do_flip_interval_workaround_cleanup;
+ uint32_t flip_interval_detect_counter;
+ uint32_t vsyncs_between_flip;
+ uint32_t vsync_to_flip_in_us;
+ uint32_t v_update_timestamp_in_us;
+};
+
struct mod_vrr_params {
bool supported;
bool send_info_frame;
@@ -121,6 +131,8 @@ struct mod_vrr_params {
struct mod_vrr_params_fixed_refresh fixed;
struct mod_vrr_params_btr btr;
+
+ struct mod_vrr_params_flip_interval flip_interval;
};
struct mod_freesync *mod_freesync_create(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f7420c3f5672..3348bb97ef81 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -294,7 +294,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
uint8_t index, struct mod_hdcp_output *output);
/* called per display to apply new authentication adjustment */
-enum mod_hdcp_status mod_hdcp_update_authentication(struct mod_hdcp *hdcp,
+enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
uint8_t index,
struct mod_hdcp_link_adjustment *link_adjust,
struct mod_hdcp_display_adjustment *display_adjust,
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 2b00f334e93d..97928d4c3b9a 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -784,3 +784,41 @@ bool dmcu_load_iram(struct dmcu *dmcu,
return result;
}
+/*
+ * is_psr_su_specific_panel() - check if sink is AMD vendor-specific PSR-SU
+ * supported eDP device.
+ *
+ * @link: dc link pointer
+ *
+ * Return: true if AMDGPU vendor specific PSR-SU eDP panel
+ */
+bool is_psr_su_specific_panel(struct dc_link *link)
+{
+ if (link->dpcd_caps.edp_rev >= DP_EDP_14) {
+ if (link->dpcd_caps.psr_info.psr_version >= DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
+ return true;
+ /*
+ * Some panels will report PSR capabilities over additional DPCD bits.
+ * Such panels are approved despite reporting only PSR v3, as long as
+ * the additional bits are reported.
+ */
+ if (link->dpcd_caps.psr_info.psr_version < DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)
+ return false;
+
+ if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) {
+ /*
+ * FIXME:
+ * This is the temporary workaround to disable PSRSU when system turned on
+ * DSC function on the sepcific sink. Once the PSRSU + DSC is fixed, this
+ * condition should be removed.
+ */
+ if (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)
+ return false;
+
+ if (link->dpcd_caps.psr_info.force_psrsu_cap == 0x1)
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 2a9f8e2d8080..1a634d8c78c5 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -52,4 +52,5 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
struct dmcu_iram_parameters params,
unsigned int inst);
+bool is_psr_su_specific_panel(struct dc_link *link);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index fe4e585781bb..741dae17562a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -116,38 +116,38 @@ enum amd_powergating_state {
/* CG flags */
-#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
-#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
-#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
-#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
-#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
-#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
-#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
-#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
-#define AMD_CG_SUPPORT_MC_LS (1 << 8)
-#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
-#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
-#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
-#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
-#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
-#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
-#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
-#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
-#define AMD_CG_SUPPORT_ROM_MGCG (1 << 17)
-#define AMD_CG_SUPPORT_DRM_LS (1 << 18)
-#define AMD_CG_SUPPORT_BIF_MGCG (1 << 19)
-#define AMD_CG_SUPPORT_GFX_3D_CGCG (1 << 20)
-#define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21)
-#define AMD_CG_SUPPORT_DRM_MGCG (1 << 22)
-#define AMD_CG_SUPPORT_DF_MGCG (1 << 23)
-#define AMD_CG_SUPPORT_VCN_MGCG (1 << 24)
-#define AMD_CG_SUPPORT_HDP_DS (1 << 25)
-#define AMD_CG_SUPPORT_HDP_SD (1 << 26)
-#define AMD_CG_SUPPORT_IH_CG (1 << 27)
-#define AMD_CG_SUPPORT_ATHUB_LS (1 << 28)
-#define AMD_CG_SUPPORT_ATHUB_MGCG (1 << 29)
-#define AMD_CG_SUPPORT_JPEG_MGCG (1 << 30)
-#define AMD_CG_SUPPORT_GFX_FGCG (1 << 31)
+#define AMD_CG_SUPPORT_GFX_MGCG (1ULL << 0)
+#define AMD_CG_SUPPORT_GFX_MGLS (1ULL << 1)
+#define AMD_CG_SUPPORT_GFX_CGCG (1ULL << 2)
+#define AMD_CG_SUPPORT_GFX_CGLS (1ULL << 3)
+#define AMD_CG_SUPPORT_GFX_CGTS (1ULL << 4)
+#define AMD_CG_SUPPORT_GFX_CGTS_LS (1ULL << 5)
+#define AMD_CG_SUPPORT_GFX_CP_LS (1ULL << 6)
+#define AMD_CG_SUPPORT_GFX_RLC_LS (1ULL << 7)
+#define AMD_CG_SUPPORT_MC_LS (1ULL << 8)
+#define AMD_CG_SUPPORT_MC_MGCG (1ULL << 9)
+#define AMD_CG_SUPPORT_SDMA_LS (1ULL << 10)
+#define AMD_CG_SUPPORT_SDMA_MGCG (1ULL << 11)
+#define AMD_CG_SUPPORT_BIF_LS (1ULL << 12)
+#define AMD_CG_SUPPORT_UVD_MGCG (1ULL << 13)
+#define AMD_CG_SUPPORT_VCE_MGCG (1ULL << 14)
+#define AMD_CG_SUPPORT_HDP_LS (1ULL << 15)
+#define AMD_CG_SUPPORT_HDP_MGCG (1ULL << 16)
+#define AMD_CG_SUPPORT_ROM_MGCG (1ULL << 17)
+#define AMD_CG_SUPPORT_DRM_LS (1ULL << 18)
+#define AMD_CG_SUPPORT_BIF_MGCG (1ULL << 19)
+#define AMD_CG_SUPPORT_GFX_3D_CGCG (1ULL << 20)
+#define AMD_CG_SUPPORT_GFX_3D_CGLS (1ULL << 21)
+#define AMD_CG_SUPPORT_DRM_MGCG (1ULL << 22)
+#define AMD_CG_SUPPORT_DF_MGCG (1ULL << 23)
+#define AMD_CG_SUPPORT_VCN_MGCG (1ULL << 24)
+#define AMD_CG_SUPPORT_HDP_DS (1ULL << 25)
+#define AMD_CG_SUPPORT_HDP_SD (1ULL << 26)
+#define AMD_CG_SUPPORT_IH_CG (1ULL << 27)
+#define AMD_CG_SUPPORT_ATHUB_LS (1ULL << 28)
+#define AMD_CG_SUPPORT_ATHUB_MGCG (1ULL << 29)
+#define AMD_CG_SUPPORT_JPEG_MGCG (1ULL << 30)
+#define AMD_CG_SUPPORT_GFX_FGCG (1ULL << 31)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
@@ -298,7 +298,7 @@ struct amd_ip_funcs {
enum amd_clockgating_state state);
int (*set_powergating_state)(void *handle,
enum amd_powergating_state state);
- void (*get_clockgating_state)(void *handle, u32 *flags);
+ void (*get_clockgating_state)(void *handle, u64 *flags);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
index 90350f46a0c4..363d2139cea2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h
@@ -988,4 +988,17 @@
#define mmMDM_WIG_PIPE_BUSY_BASE_IDX 1
+/* VCN 2_6_0 regs */
+#define mmUVD_RAS_VCPU_VCODEC_STATUS 0x0057
+#define mmUVD_RAS_VCPU_VCODEC_STATUS_BASE_IDX 1
+#define mmUVD_RAS_MMSCH_FATAL_ERROR 0x0058
+#define mmUVD_RAS_MMSCH_FATAL_ERROR_BASE_IDX 1
+
+
+/* JPEG 2_6_0 regs */
+#define mmUVD_RAS_JPEG0_STATUS 0x0059
+#define mmUVD_RAS_JPEG0_STATUS_BASE_IDX 1
+#define mmUVD_RAS_JPEG1_STATUS 0x005a
+#define mmUVD_RAS_JPEG1_STATUS_BASE_IDX 1
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h
index c41c59c30006..8de883b76d90 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h
@@ -3606,4 +3606,28 @@
#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL
+/* VCN 2_6_0 UVD_RAS_VCPU_VCODEC_STATUS */
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_VCPU_VCODEC_STATUS__POISONED_PF_MASK 0x80000000L
+
+/* VCN 2_6_0 UVD_RAS_MMSCH_FATAL_ERROR */
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_MMSCH_FATAL_ERROR__POISONED_PF_MASK 0x80000000L
+
+/* JPEG 2_6_0 UVD_RAS_JPEG0_STATUS */
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG0_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG0_STATUS__POISONED_PF_MASK 0x80000000L
+
+/* JPEG 2_6_0 UVD_RAS_JPEG1_STATUS */
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF__SHIFT 0x0
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF__SHIFT 0x1f
+#define UVD_RAS_JPEG1_STATUS__POISONED_VF_MASK 0x7FFFFFFFL
+#define UVD_RAS_JPEG1_STATUS__POISONED_PF_MASK 0x80000000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index a486769b66c6..b25026c3ec96 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -93,7 +93,7 @@ typedef struct ip
uint8_t harvest : 4; /* Harvest */
uint8_t reserved : 4; /* Placeholder field */
#endif
- uint32_t base_address[1]; /* variable number of Addresses */
+ uint32_t base_address[]; /* variable number of Addresses */
} ip;
typedef struct die_header
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 5504d81c77b7..5472f9936feb 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -427,6 +427,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int i;
if (!adev->pm.dpm_enabled)
return;
@@ -434,6 +435,15 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
if (!pp_funcs->pm_compute_clocks)
return;
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
mutex_lock(&adev->pm.mutex);
pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
@@ -443,6 +453,20 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_dpm_compute_clocks(adev);
+ return;
+ }
+
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
@@ -453,6 +477,21 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_dpm_compute_clocks(adev);
+ return;
+ }
+
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
@@ -751,7 +790,7 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
- if (!pp_funcs->force_performance_level)
+ if (!pp_funcs || !pp_funcs->force_performance_level)
return 0;
if (adev->pm.dpm.thermal_active)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5cd67ddf8495..78ec9b71197d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1954,8 +1954,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
uint32_t mask, enum amdgpu_device_attr_states *states)
{
struct device_attribute *dev_attr = &attr->dev_attr;
+ uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
const char *attr_name = dev_attr->attr.name;
- enum amd_asic_type asic_type = adev->asic_type;
if (!(attr->flags & mask)) {
*states = ATTR_STATE_UNSUPPORTED;
@@ -1965,53 +1966,63 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
- if (asic_type < CHIP_VEGA10)
+ if (gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (asic_type < CHIP_VEGA10 ||
- asic_type == CHIP_ARCTURUS ||
- asic_type == CHIP_ALDEBARAN)
+ if (gc_ver < IP_VERSION(9, 0, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
- if (asic_type < CHIP_VEGA20)
+ if (mp1_ver < IP_VERSION(10, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
- if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
+ if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pcie_bw)) {
/* PCIe Perf counters won't work on APU nodes */
if (adev->flags & AMD_IS_APU)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(unique_id)) {
- if (asic_type != CHIP_VEGA10 &&
- asic_type != CHIP_VEGA20 &&
- asic_type != CHIP_ARCTURUS &&
- asic_type != CHIP_ALDEBARAN)
+ switch (gc_ver) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(10, 3, 0):
+ *states = ATTR_STATE_SUPPORTED;
+ break;
+ default:
*states = ATTR_STATE_UNSUPPORTED;
+ }
} else if (DEVICE_ATTR_IS(pp_features)) {
- if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
+ if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
- if (asic_type < CHIP_VEGA12)
+ if (gc_ver < IP_VERSION(9, 1, 0))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
- if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
- if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
+ if (!(gc_ver == IP_VERSION(10, 3, 1) ||
+ gc_ver == IP_VERSION(10, 3, 0) ||
+ gc_ver == IP_VERSION(10, 1, 2)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
}
- switch (asic_type) {
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
+ switch (gc_ver) {
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
DEVICE_ATTR_IS(pp_dpm_socclk) ||
@@ -2026,7 +2037,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
/* SMU MP1 does not support dcefclk level setting */
- if (asic_type >= CHIP_NAVI10) {
+ if (gc_ver >= IP_VERSION(10, 0, 0)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2864,8 +2875,9 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
- if (adev->asic_type == CHIP_VANGOGH)
+ if (gc_ver == IP_VERSION(10, 3, 1))
return sysfs_emit(buf, "%s\n",
to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
"fastPPT" : "slowPPT");
@@ -3177,6 +3189,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
/* under multi-vf mode, the hwmon attributes are all not supported */
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
@@ -3245,18 +3258,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
+ /* not implemented yet for GC 10.3.1 APUs */
if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
+ ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
+ attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
return 0;
+ /* not implemented yet for APUs having <= GC 9.3.0 */
if (((adev->family == AMDGPU_FAMILY_SI) ||
- ((adev->flags & AMD_IS_APU) &&
- (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */
+ ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
return 0;
@@ -3294,8 +3307,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* only SOC15 dGPUs support hotspot and mem temperatures */
- if (((adev->flags & AMD_IS_APU) ||
- adev->asic_type < CHIP_VEGA10) &&
+ if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
(attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
@@ -3310,13 +3322,13 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* only Vangogh has fast PPT limit and power labels */
- if (!(adev->asic_type == CHIP_VANGOGH) &&
+ if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
(attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
@@ -3421,6 +3433,8 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
{
+ uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
+ uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
uint32_t value;
uint64_t value64 = 0;
uint32_t query = 0;
@@ -3467,7 +3481,8 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
- if (adev->asic_type > CHIP_VEGA20) {
+ /* ASICs greater than CHIP_VEGA20 supports these sensors */
+ if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
/* VCN clocks */
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
if (!value) {
@@ -3511,7 +3526,7 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
return 0;
}
-static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
+static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
{
int i;
@@ -3524,7 +3539,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
{
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct drm_device *dev = adev_to_drm(adev);
- u32 flags = 0;
+ u64 flags = 0;
int r;
if (amdgpu_in_reset(adev))
@@ -3546,7 +3561,7 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
amdgpu_device_ip_get_clockgating_state(adev, &flags);
- seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
+ seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
amdgpu_parse_cg_state(m, flags);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
index a920515e2274..52045ad59bed 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h
@@ -26,7 +26,7 @@
struct cg_flag_name
{
- u32 flag;
+ u64 flag;
const char *name;
};
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index 9613c6181c17..d3fe149d8476 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1028,16 +1028,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i = 0;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
amdgpu_dpm_get_active_displays(adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index caae54487f9c..49c398ec0aaf 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -3892,40 +3892,6 @@ static int si_set_boot_state(struct amdgpu_device *adev)
}
#endif
-static int si_set_powergating_by_smu(void *handle,
- uint32_t block_type,
- bool gate)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- switch (block_type) {
- case AMD_IP_BLOCK_TYPE_UVD:
- if (!gate) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
-
- amdgpu_legacy_dpm_compute_clocks(handle);
- break;
- case AMD_IP_BLOCK_TYPE_VCE:
- if (!gate) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
-
- amdgpu_legacy_dpm_compute_clocks(handle);
- break;
- default:
- break;
- }
- return 0;
-}
-
static int si_set_sw_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
@@ -7331,17 +7297,15 @@ static int si_parse_power_table(struct amdgpu_device *adev)
if (!adev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
+ for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
adev->pm.dpm.ps[i].ps_priv = ps;
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
non_clock_info,
@@ -7363,8 +7327,8 @@ static int si_parse_power_table(struct amdgpu_device *adev)
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ adev->pm.dpm.num_ps++;
}
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
@@ -8125,7 +8089,6 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.print_power_state = &si_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
- .set_powergating_by_smu = &si_set_powergating_by_smu,
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index a2da46bf3985..1eb4e613b27a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -671,6 +671,22 @@ static int pp_dpm_force_clock_level(void *handle,
return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
+static int pp_dpm_emit_clock_levels(void *handle,
+ enum pp_clock_type type,
+ char *buf,
+ int *offset)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EOPNOTSUPP;
+
+ if (!hwmgr->hwmgr_func->emit_clock_levels)
+ return -ENOENT;
+
+ return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
+}
+
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
@@ -1487,16 +1503,6 @@ static void pp_pm_compute_clocks(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
- int i = 0;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
if (!amdgpu_device_has_dc_support(adev)) {
amdgpu_dpm_get_active_displays(adev);
@@ -1535,6 +1541,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_pp_table = pp_dpm_get_pp_table,
.set_pp_table = pp_dpm_set_pp_table,
.force_clock_level = pp_dpm_force_clock_level,
+ .emit_clock_levels = pp_dpm_emit_clock_levels,
.print_clock_levels = pp_dpm_print_clock_levels,
.get_sclk_od = pp_dpm_get_sclk_od,
.set_sclk_od = pp_dpm_set_sclk_od,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 37324f2009ca..99bfe5efe171 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -4625,6 +4625,152 @@ static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
+static int vega10_emit_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf, int *offset)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+ struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+ struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
+ struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
+ struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
+ uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+ PPTable_t *pptable = &(data->smc_state_table.pp_table);
+
+ uint32_t i, now, count = 0;
+ int ret = 0;
+
+ switch (type) {
+ case PP_SCLK:
+ if (data->registry_data.sclk_dpm_key_disabled)
+ return -EOPNOTSUPP;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (hwmgr->pp_one_vf &&
+ (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
+ count = 5;
+ else
+ count = sclk_table->count;
+ for (i = 0; i < count; i++)
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ if (data->registry_data.mclk_dpm_key_disabled)
+ return -EOPNOTSUPP;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (i = 0; i < mclk_table->count; i++)
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_SOCCLK:
+ if (data->registry_data.socclk_dpm_key_disabled)
+ return -EOPNOTSUPP;
+
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (i = 0; i < soc_table->count; i++)
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, soc_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_DCEFCLK:
+ if (data->registry_data.dcefclk_dpm_key_disabled)
+ return -EOPNOTSUPP;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetClockFreqMHz,
+ CLK_DCEFCLK, &now);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (i = 0; i < dcef_table->count; i++)
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMhz %s\n",
+ i, dcef_table->dpm_levels[i].value / 100,
+ (dcef_table->dpm_levels[i].value / 100 == now) ?
+ "*" : "");
+ break;
+ case PP_PCIE:
+ current_gen_speed =
+ vega10_get_current_pcie_link_speed_level(hwmgr);
+ current_lane_width =
+ vega10_get_current_pcie_link_width_level(hwmgr);
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ gen_speed = pptable->PcieGenSpeed[i];
+ lane_width = pptable->PcieLaneCount[i];
+
+ *offset += sysfs_emit_at(buf, *offset, "%d: %s %s %s\n", i,
+ (gen_speed == 0) ? "2.5GT/s," :
+ (gen_speed == 1) ? "5.0GT/s," :
+ (gen_speed == 2) ? "8.0GT/s," :
+ (gen_speed == 3) ? "16.0GT/s," : "",
+ (lane_width == 1) ? "x1" :
+ (lane_width == 2) ? "x2" :
+ (lane_width == 3) ? "x4" :
+ (lane_width == 4) ? "x8" :
+ (lane_width == 5) ? "x12" :
+ (lane_width == 6) ? "x16" : "",
+ (current_gen_speed == gen_speed) &&
+ (current_lane_width == lane_width) ?
+ "*" : "");
+ }
+ break;
+
+ case OD_SCLK:
+ if (!hwmgr->od_enabled)
+ return -EOPNOTSUPP;
+
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK");
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
+ for (i = 0; i < podn_vdd_dep->count; i++)
+ *offset += sysfs_emit_at(buf, *offset, "%d: %10uMhz %10umV\n",
+ i, podn_vdd_dep->entries[i].clk / 100,
+ podn_vdd_dep->entries[i].vddc);
+ break;
+ case OD_MCLK:
+ if (!hwmgr->od_enabled)
+ return -EOPNOTSUPP;
+
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK");
+ podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
+ for (i = 0; i < podn_vdd_dep->count; i++)
+ *offset += sysfs_emit_at(buf, *offset, "%d: %10uMhz %10umV\n",
+ i, podn_vdd_dep->entries[i].clk/100,
+ podn_vdd_dep->entries[i].vddc);
+ break;
+ case OD_RANGE:
+ if (!hwmgr->od_enabled)
+ return -EOPNOTSUPP;
+
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_RANGE");
+ *offset += sysfs_emit_at(buf, *offset, "SCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
+ *offset += sysfs_emit_at(buf, *offset, "MCLK: %7uMHz %10uMHz\n",
+ data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
+ *offset += sysfs_emit_at(buf, *offset, "VDDC: %7umV %11umV\n",
+ data->odn_dpm_table.min_vddc,
+ data->odn_dpm_table.max_vddc);
+ break;
+ default:
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@@ -5559,6 +5705,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = vega10_display_clock_voltage_request,
.force_clock_level = vega10_force_clock_level,
+ .emit_clock_levels = vega10_emit_clock_levels,
.print_clock_levels = vega10_print_clock_levels,
.display_config_changed = vega10_display_configuration_changed_task,
.powergate_uvd = vega10_power_gate_uvd,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 4f7f2f455301..27f8d0e0e6a8 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -313,6 +313,8 @@ struct pp_hwmgr_func {
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
+ int (*emit_clock_levels)(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf, int *offset);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index ef57b6089c69..46e34ed8a3c8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1333,6 +1333,8 @@ typedef enum {
METRICS_VOLTAGE_VDDGFX,
METRICS_SS_APU_SHARE,
METRICS_SS_DGPU_SHARE,
+ METRICS_UNIQUE_ID_UPPER32,
+ METRICS_UNIQUE_ID_LOWER32,
} MetricsMember_t;
enum smu_cmn2asic_mapping_type {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 3e4a314ef925..08f0bb2af5d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
@@ -1419,8 +1419,8 @@ typedef struct {
uint8_t PcieRate ;
uint8_t PcieWidth ;
uint16_t AverageGfxclkFrequencyTarget;
- uint16_t Padding16_2;
+ uint16_t Padding16_2;
} SmuMetrics_t;
typedef struct {
@@ -1476,8 +1476,8 @@ typedef struct {
uint8_t PcieRate ;
uint8_t PcieWidth ;
uint16_t AverageGfxclkFrequencyTarget;
- uint16_t Padding16_2;
+ uint16_t Padding16_2;
} SmuMetrics_V2_t;
typedef struct {
@@ -1535,6 +1535,9 @@ typedef struct {
uint8_t PcieWidth;
uint16_t AverageGfxclkFrequencyTarget;
+ uint32_t PublicSerialNumLower32;
+ uint32_t PublicSerialNumUpper32;
+
} SmuMetrics_V3_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 38f04836c82f..ab3e9d8b831e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -715,6 +715,14 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
*value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
break;
+ case METRICS_UNIQUE_ID_UPPER32:
+ /* Only supported in 0x3A5300+, metrics_v3 requires 0x3A4900+ */
+ *value = use_metrics_v3 ? metrics_v3->PublicSerialNumUpper32 : 0;
+ break;
+ case METRICS_UNIQUE_ID_LOWER32:
+ /* Only supported in 0x3A5300+, metrics_v3 requires 0x3A4900+ */
+ *value = use_metrics_v3 ? metrics_v3->PublicSerialNumLower32 : 0;
+ break;
default:
*value = UINT_MAX;
break;
@@ -1773,6 +1781,28 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
return ret;
}
+static void sienna_cichlid_get_unique_id(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t upper32 = 0, lower32 = 0;
+
+ /* Only supported as of version 0.58.83.0 and only on Sienna Cichlid */
+ if (smu->smc_fw_version < 0x3A5300 ||
+ smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))
+ return;
+
+ if (sienna_cichlid_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
+ goto out;
+ if (sienna_cichlid_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_LOWER32, &lower32))
+ goto out;
+
+out:
+
+ adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
+ if (adev->serial[0] == '\0')
+ sprintf(adev->serial, "%016llx", adev->unique_id);
+}
+
static int sienna_cichlid_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
{
uint32_t num_discrete_levels = 0;
@@ -4182,6 +4212,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_ecc_info = sienna_cichlid_get_ecc_info,
.get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
.set_config_table = sienna_cichlid_set_config_table,
+ .get_unique_id = sienna_cichlid_get_unique_id,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index cd81f848d45a..38af648cb857 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -650,6 +650,12 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
case METRICS_THROTTLER_STATUS:
*value = metrics->ThrottlerStatus;
break;
+ case METRICS_UNIQUE_ID_UPPER32:
+ *value = metrics->PublicSerialNumUpper32;
+ break;
+ case METRICS_UNIQUE_ID_LOWER32:
+ *value = metrics->PublicSerialNumLower32;
+ break;
default:
*value = UINT_MAX;
break;
@@ -1614,16 +1620,12 @@ static void aldebaran_i2c_control_fini(struct smu_context *smu)
static void aldebaran_get_unique_id(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- SmuMetrics_t *metrics = smu->smu_table.metrics_table;
uint32_t upper32 = 0, lower32 = 0;
- int ret;
- ret = smu_cmn_get_metrics_table(smu, NULL, false);
- if (ret)
+ if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
+ goto out;
+ if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_LOWER32, &lower32))
goto out;
-
- upper32 = metrics->PublicSerialNumUpper32;
- lower32 = metrics->PublicSerialNumLower32;
out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index b8d0c70ff668..f12319883a80 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -356,9 +356,11 @@ int smu_cmn_wait_for_response(struct smu_context *smu)
* completion of the command, and return back a value from the SMU in
* @read_arg pointer.
*
- * Return 0 on success, -errno on error, if we weren't able to send
- * the message or if the message completed with some kind of
- * error. See __smu_cmn_reg2errno() for details of the -errno.
+ * Return 0 on success, -errno when a problem is encountered sending
+ * message or receiving reply. If there is a PCI bus recovery or
+ * the destination is a virtual GPU which does not allow this message
+ * type, the message is simply dropped and success is also returned.
+ * See __smu_cmn_reg2errno() for details of the -errno.
*
* If we weren't able to send the message to the SMU, we also print
* the error to the standard log.
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index f4df344509a8..9a2cfab3a177 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -214,29 +214,6 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
}
EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
-static int find_panel_or_bridge(struct device_node *node,
- struct drm_panel **panel,
- struct drm_bridge **bridge)
-{
- if (panel) {
- *panel = of_drm_find_panel(node);
- if (!IS_ERR(*panel))
- return 0;
-
- /* Clear the panel pointer in case of error. */
- *panel = NULL;
- }
-
- /* No panel found yet, check for a bridge next. */
- if (bridge) {
- *bridge = of_drm_find_bridge(node);
- if (*bridge)
- return 0;
- }
-
- return -EPROBE_DEFER;
-}
-
/**
* drm_of_find_panel_or_bridge - return connected panel or bridge device
* @np: device tree node containing encoder output ports
@@ -259,44 +236,49 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel,
struct drm_bridge **bridge)
{
- struct device_node *node;
- int ret;
+ int ret = -EPROBE_DEFER;
+ struct device_node *remote;
if (!panel && !bridge)
return -EINVAL;
-
if (panel)
*panel = NULL;
- if (bridge)
- *bridge = NULL;
-
- /* Check for a graph on the device node first. */
- if (of_graph_is_present(np)) {
- node = of_graph_get_remote_node(np, port, endpoint);
- if (node) {
- ret = find_panel_or_bridge(node, panel, bridge);
- of_node_put(node);
-
- if (!ret)
- return 0;
- }
- }
- /* Otherwise check for any child node other than port/ports. */
- for_each_available_child_of_node(np, node) {
- if (of_node_name_eq(node, "port") ||
- of_node_name_eq(node, "ports"))
- continue;
+ /*
+ * of_graph_get_remote_node() produces a noisy error message if port
+ * node isn't found and the absence of the port is a legit case here,
+ * so at first we silently check whether graph presents in the
+ * device-tree node.
+ */
+ if (!of_graph_is_present(np))
+ return -ENODEV;
- ret = find_panel_or_bridge(node, panel, bridge);
- of_node_put(node);
+ remote = of_graph_get_remote_node(np, port, endpoint);
+ if (!remote)
+ return -ENODEV;
+
+ if (panel) {
+ *panel = of_drm_find_panel(remote);
+ if (!IS_ERR(*panel))
+ ret = 0;
+ else
+ *panel = NULL;
+ }
+
+ /* No panel found yet, check for a bridge next. */
+ if (bridge) {
+ if (ret) {
+ *bridge = of_drm_find_bridge(remote);
+ if (*bridge)
+ ret = 0;
+ } else {
+ *bridge = NULL;
+ }
- /* Stop at the first found occurrence. */
- if (!ret)
- return 0;
}
- return -EPROBE_DEFER;
+ of_node_put(remote);
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 06911f783f56..0f2837f07741 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -4,7 +4,7 @@ config DRM_I915
depends on DRM
depends on X86 && PCI
depends on !PREEMPT_RT
- select INTEL_GTT
+ select INTEL_GTT if X86
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -33,6 +33,7 @@ config DRM_I915
select VMAP_PFN
select DRM_TTM
select DRM_BUDDY
+ select AUXILIARY_BUS
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 815589f8658c..cd0bf6806228 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# core driver code
i915-y += i915_driver.o \
+ i915_drm_client.o \
i915_config.o \
i915_getparam.o \
i915_ioctl.o \
@@ -106,6 +107,8 @@ gt-y += \
gt/intel_gt_pm_debugfs.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
+ gt/intel_gt_sysfs.o \
+ gt/intel_gt_sysfs_pm.o \
gt/intel_gtt.o \
gt/intel_llc.o \
gt/intel_lrc.o \
@@ -125,6 +128,8 @@ gt-y += \
gt/intel_workarounds.o \
gt/shmem_utils.o \
gt/sysfs_engines.o
+# x86 intel-gtt module support
+gt-$(CONFIG_X86) += gt/intel_gt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -185,9 +190,11 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_uc_fw.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_capture.o \
gt/uc/intel_guc_ct.o \
gt/uc/intel_guc_debugfs.o \
gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_hwconfig.o \
gt/uc/intel_guc_log.o \
gt/uc/intel_guc_log_debugfs.o \
gt/uc/intel_guc_rc.o \
@@ -197,6 +204,9 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_huc_debugfs.o \
gt/uc/intel_huc_fw.o
+# graphics system controller (GSC) support
+i915-y += gt/intel_gsc.o
+
# modesetting core code
i915-y += \
display/hsw_ips.o \
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 97cf3cac0105..fb6cf30ee628 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -97,6 +97,14 @@
#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
+enum intel_dp_aux_backlight_modparam {
+ INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
+ INTEL_DP_AUX_BACKLIGHT_OFF = 0,
+ INTEL_DP_AUX_BACKLIGHT_ON = 1,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
+};
+
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
@@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
return false;
}
+ /*
+ * If we don't have HDR static metadata there is no way to
+ * runtime detect used range for nits based control. For now
+ * do not use Intel proprietary eDP backlight control if we
+ * don't have this data in panel EDID. In case we find panel
+ * which supports only nits based control, but doesn't provide
+ * HDR static metadata we need to start maintaining table of
+ * ranges for such panels.
+ */
+ if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
+ BIT(HDMI_STATIC_METADATA_TYPE1))) {
+ drm_info(&i915->drm,
+ "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
+ INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
+ return false;
+ }
+
panel->backlight.edp.intel.sdr_uses_aux =
tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
@@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
.get = intel_dp_aux_vesa_get_backlight,
};
-enum intel_dp_aux_backlight_modparam {
- INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
- INTEL_DP_AUX_BACKLIGHT_OFF = 0,
- INTEL_DP_AUX_BACKLIGHT_ON = 1,
- INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
- INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
-};
-
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 15b2716172f7..fb0e7e79e0cd 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -300,5 +300,5 @@ void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- i915_vm_close(&dpt->vm);
+ i915_vm_put(&dpt->vm);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index e94923e9dbb1..9f5a6b79e95b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -2029,7 +2029,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
/* object is backed with LMEM for discrete */
i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
/* object is "remote", not in local memory */
i915_gem_object_put(obj);
return ERR_PTR(-EREMOTE);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index a307b4993bcf..bd6e7c98e751 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -140,7 +140,7 @@ retry:
if (!ret && phys_cursor)
ret = i915_gem_object_attach_phys(obj, alignment);
else if (!ret && HAS_LMEM(dev_priv))
- ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+ ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0);
/* TODO: Do we need to sync when migration becomes async? */
if (!ret)
ret = i915_gem_object_pin_pages(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 670835318a1f..ff303c7d3a57 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1045,7 +1045,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
const struct intel_crtc_state *crtc_state;
struct intel_fbc *fbc = plane->fbc;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 2cd62a187df3..221336178991 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -279,7 +279,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
(unsigned long)(ggtt->gmadr.start + vma->node.start);
- info->fix.smem_len = vma->node.size;
+ info->fix.smem_len = vma->size;
}
vaddr = i915_vma_pin_iomap(vma);
@@ -290,7 +290,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
info->screen_base = vaddr;
- info->screen_size = vma->node.size;
+ info->screen_size = vma->size;
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index e207d12286b5..d10f27d0b7b0 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "intel_atomic_plane.h"
#include "intel_display.h"
@@ -46,16 +47,55 @@ static struct i915_vma *
initial_plane_vma(struct drm_i915_private *i915,
struct intel_initial_plane_config *plane_config)
{
- struct intel_memory_region *mem = i915->mm.stolen_region;
+ struct intel_memory_region *mem;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ resource_size_t phys_base;
u32 base, size;
+ u64 pinctl;
- if (!mem || plane_config->size == 0)
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base, I915_GTT_MIN_ALIGNMENT);
+ if (IS_DGFX(i915)) {
+ gen8_pte_t __iomem *gte = to_gt(i915)->ggtt->gsm;
+ gen8_pte_t pte;
+
+ gte += base / I915_GTT_PAGE_SIZE;
+
+ pte = ioread64(gte);
+ if (!(pte & GEN12_GGTT_PTE_LM)) {
+ drm_err(&i915->drm,
+ "Initial plane programming missing PTE_LM bit\n");
+ return NULL;
+ }
+
+ phys_base = pte & I915_GTT_PAGE_MASK;
+ mem = i915->mm.regions[INTEL_REGION_LMEM_0];
+
+ /*
+ * We don't currently expect this to ever be placed in the
+ * stolen portion.
+ */
+ if (phys_base >= resource_size(&mem->region)) {
+ drm_err(&i915->drm,
+ "Initial plane programming using invalid range, phys_base=%pa\n",
+ &phys_base);
+ return NULL;
+ }
+
+ drm_dbg(&i915->drm,
+ "Using phys_base=%pa, based on initial plane programming\n",
+ &phys_base);
+ } else {
+ phys_base = base;
+ mem = i915->mm.stolen_region;
+ }
+
+ if (!mem)
return NULL;
- base = round_down(plane_config->base,
- I915_GTT_MIN_ALIGNMENT);
size = round_up(plane_config->base + plane_config->size,
mem->min_page_size);
size -= base;
@@ -66,10 +106,11 @@ initial_plane_vma(struct drm_i915_private *i915,
* features.
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ mem == i915->mm.stolen_region &&
size * 2 > i915->stolen_usable_size)
return NULL;
- obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
if (IS_ERR(obj))
return NULL;
@@ -99,7 +140,10 @@ initial_plane_vma(struct drm_i915_private *i915,
if (IS_ERR(vma))
goto err_obj;
- if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ pinctl = PIN_GLOBAL | PIN_OFFSET_FIXED | base;
+ if (HAS_GMCH(i915))
+ pinctl |= PIN_MAPPABLE;
+ if (i915_vma_pin(vma, 0, 0, pinctl))
goto err_obj;
if (i915_gem_object_is_tiled(obj) &&
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 5a55010a9b2f..8ec7c161284b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -891,6 +891,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /* Wa_16011303918:adl-p */
+ if (crtc_state->vrr.enable &&
+ IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+ return false;
+ }
+
+ if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+ return false;
+ }
+
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
!HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -904,12 +918,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
- return false;
+ goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
- return false;
+ goto unsupported;
}
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -918,25 +932,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
- return false;
- }
-
- if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
- return false;
- }
-
- /* Wa_16011303918:adl-p */
- if (crtc_state->vrr.enable &&
- IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 not enabled, not compatible with HW stepping + VRR\n");
- return false;
+ goto unsupported;
}
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
+
+unsupported:
+ crtc_state->enable_psr2_sel_fetch = false;
+ return false;
}
void intel_psr_compute_config(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 5db83aaf93ee..ab4c5ab28e4d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1031,23 +1031,44 @@ static void free_engines_rcu(struct rcu_head *rcu)
free_engines(engines);
}
+static void accumulate_runtime(struct i915_drm_client *client,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!client)
+ return;
+
+ /* Transfer accumulated runtime to the parent GEM context. */
+ for_each_gem_engine(ce, engines, it) {
+ unsigned int class = ce->engine->uabi_class;
+
+ GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime));
+ atomic64_add(intel_context_get_total_runtime_ns(ce),
+ &client->past_runtime[class]);
+ }
+}
+
static int
engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct i915_gem_engines *engines =
container_of(fence, typeof(*engines), fence);
+ struct i915_gem_context *ctx = engines->ctx;
switch (state) {
case FENCE_COMPLETE:
if (!list_empty(&engines->link)) {
- struct i915_gem_context *ctx = engines->ctx;
unsigned long flags;
spin_lock_irqsave(&ctx->stale.lock, flags);
list_del(&engines->link);
spin_unlock_irqrestore(&ctx->stale.lock, flags);
}
- i915_gem_context_put(engines->ctx);
+ accumulate_runtime(ctx->client, engines);
+ i915_gem_context_put(ctx);
+
break;
case FENCE_FREE:
@@ -1257,6 +1278,9 @@ static void i915_gem_context_release_work(struct work_struct *work)
if (ctx->pxp_wakeref)
intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
+ if (ctx->client)
+ i915_drm_client_put(ctx->client);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1467,7 +1491,7 @@ static void set_closed_name(struct i915_gem_context *ctx)
static void context_close(struct i915_gem_context *ctx)
{
- struct i915_address_space *vm;
+ struct i915_drm_client *client;
/* Flush any concurrent set_engines() */
mutex_lock(&ctx->engines_mutex);
@@ -1480,19 +1504,6 @@ static void context_close(struct i915_gem_context *ctx)
set_closed_name(ctx);
- vm = ctx->vm;
- if (vm) {
- /* i915_vm_close drops the final reference, which is a bit too
- * early and could result in surprises with concurrent
- * operations racing with thist ctx close. Keep a full reference
- * until the end.
- */
- i915_vm_get(vm);
- i915_vm_close(vm);
- }
-
- ctx->file_priv = ERR_PTR(-EBADF);
-
/*
* The LUT uses the VMA as a backpointer to unref the object,
* so we need to clear the LUT before we close all the VMA (inside
@@ -1500,10 +1511,19 @@ static void context_close(struct i915_gem_context *ctx)
*/
lut_close(ctx);
+ ctx->file_priv = ERR_PTR(-EBADF);
+
spin_lock(&ctx->i915->gem.contexts.lock);
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
+ client = ctx->client;
+ if (client) {
+ spin_lock(&client->ctx_lock);
+ list_del_rcu(&ctx->client_link);
+ spin_unlock(&client->ctx_lock);
+ }
+
mutex_unlock(&ctx->mutex);
/*
@@ -1598,12 +1618,8 @@ i915_gem_create_context(struct drm_i915_private *i915,
}
vm = &ppgtt->vm;
}
- if (vm) {
- ctx->vm = i915_vm_open(vm);
-
- /* i915_vm_open() takes a reference */
- i915_vm_put(vm);
- }
+ if (vm)
+ ctx->vm = vm;
mutex_init(&ctx->engines_mutex);
if (pc->num_user_engines >= 0) {
@@ -1653,7 +1669,7 @@ err_engines:
free_engines(e);
err_vm:
if (ctx->vm)
- i915_vm_close(ctx->vm);
+ i915_vm_put(ctx->vm);
err_ctx:
kfree(ctx);
return ERR_PTR(err);
@@ -1680,6 +1696,8 @@ static void gem_context_register(struct i915_gem_context *ctx,
ctx->file_priv = fpriv;
ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->client = i915_drm_client_get(fpriv->client);
+
snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
current->comm, pid_nr(ctx->pid));
@@ -1687,6 +1705,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
WARN_ON(old);
+ spin_lock(&ctx->client->ctx_lock);
+ list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
+ spin_unlock(&ctx->client->ctx_lock);
+
spin_lock(&i915->gem.contexts.lock);
list_add_tail(&ctx->link, &i915->gem.contexts.list);
spin_unlock(&i915->gem.contexts.lock);
@@ -1837,7 +1859,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
if (err)
return err;
- i915_vm_open(vm);
+ i915_vm_get(vm);
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->value = id;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 282cdb8a5c5a..cb78214a7dcd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -293,6 +293,12 @@ struct i915_gem_context {
/** @link: place with &drm_i915_private.context_list */
struct list_head link;
+ /** @client: struct i915_drm_client */
+ struct i915_drm_client *client;
+
+ /** @client_link: for linking onto &i915_drm_client.ctx_list */
+ struct list_head client_link;
+
/**
* @ref: reference count
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index c6eb023d3d86..5802692ea604 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -123,7 +123,7 @@ __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
*/
flags = I915_BO_ALLOC_USER;
- ret = mr->ops->init_object(mr, obj, size, 0, flags);
+ ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
if (ret)
goto object_free;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 13917231ae81..f5062d0c6333 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -66,15 +66,6 @@ err:
return ERR_PTR(ret);
}
-static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *sg,
- enum dma_data_direction dir)
-{
- dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
- sg_free_table(sg);
- kfree(sg);
-}
-
static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
struct iosys_map *map)
{
@@ -102,11 +93,15 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf,
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int ret;
if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
+ if (HAS_LMEM(i915))
+ return drm_gem_prime_mmap(&obj->base, vma);
+
if (!obj->base.filp)
return -ENODEV;
@@ -209,7 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.attach = i915_gem_dmabuf_attach,
.detach = i915_gem_dmabuf_detach,
.map_dma_buf = i915_gem_map_dma_buf,
- .unmap_dma_buf = i915_gem_unmap_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index fd0e15d9573c..b3383e047505 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1321,10 +1321,8 @@ static void *reloc_vaddr(struct i915_vma *vma,
static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
{
if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
- if (flushes & CLFLUSH_BEFORE) {
- clflushopt(addr);
- mb();
- }
+ if (flushes & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(addr, sizeof(*addr));
*addr = value;
@@ -1336,7 +1334,7 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
* to ensure ordering of clflush wrt to the system.
*/
if (flushes & CLFLUSH_AFTER)
- clflushopt(addr);
+ drm_clflush_virt_range(addr, sizeof(*addr));
} else
*addr = value;
}
@@ -2690,6 +2688,11 @@ eb_select_engine(struct i915_execbuffer *eb)
if (err)
goto err;
+ if (!i915_vm_tryget(ce->vm)) {
+ err = -ENOENT;
+ goto err;
+ }
+
eb->context = ce;
eb->gt = ce->engine->gt;
@@ -2713,6 +2716,7 @@ eb_put_engine(struct i915_execbuffer *eb)
{
struct intel_context *child;
+ i915_vm_put(eb->context->vm);
intel_gt_pm_put(eb->gt);
for_each_child(eb->context, child)
intel_context_put(child);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 02ed3b269326..8949fb0a944f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -102,7 +102,7 @@ __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
resource_size_t page_size,
unsigned int flags)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
size, page_size, flags);
}
@@ -137,6 +137,6 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags)
{
- return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+ return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
size, 0, flags);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c3ea243d414d..0c5c43852e24 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
* mmap ioctl is disallowed for all discrete platforms,
* and for all platforms with GRAPHICS_VER > 12.
*/
- if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+ if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
return -EOPNOTSUPP;
if (args->flags & ~(I915_MMAP_WC))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 747ac65e060f..06b1b188ce5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -606,6 +606,9 @@ bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
if (!mr)
return false;
+ if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
+ return false;
+
if (obj->mm.region == mr)
return true;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index fd54eb8f4826..2c88bdb8ff7c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -631,6 +631,8 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
+ resource_size_t bo_offset;
+
unsigned long scratch;
u64 encode;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 5866c88e4d02..f46ee16a323a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -29,11 +29,12 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
mutex_unlock(&mem->objects.lock);
}
-struct drm_i915_gem_object *
-i915_gem_object_create_region(struct intel_memory_region *mem,
- resource_size_t size,
- resource_size_t page_size,
- unsigned int flags)
+static struct drm_i915_gem_object *
+__i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj;
resource_size_t default_page_size;
@@ -64,6 +65,9 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
size = round_up(size, default_page_size);
+ if (default_page_size == size)
+ flags |= I915_BO_ALLOC_CONTIGUOUS;
+
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
@@ -85,7 +89,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
if (default_page_size < mem->min_page_size)
flags |= I915_BO_ALLOC_PM_EARLY;
- err = mem->ops->init_object(mem, obj, size, page_size, flags);
+ err = mem->ops->init_object(mem, obj, offset, size, page_size, flags);
if (err)
goto err_object_free;
@@ -97,6 +101,40 @@ err_object_free:
return ERR_PTR(err);
}
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+ resource_size_t size,
+ resource_size_t page_size,
+ unsigned int flags)
+{
+ return __i915_gem_object_create_region(mem, I915_BO_INVALID_OFFSET,
+ size, page_size, flags);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region_at(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size,
+ unsigned int flags)
+{
+ GEM_BUG_ON(offset == I915_BO_INVALID_OFFSET);
+
+ if (GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
+ GEM_WARN_ON(!IS_ALIGNED(offset, mem->min_page_size)))
+ return ERR_PTR(-EINVAL);
+
+ if (range_overflows(offset, size, resource_size(&mem->region)))
+ return ERR_PTR(-EINVAL);
+
+ if (!(flags & I915_BO_ALLOC_GPU_ONLY) &&
+ offset + size > mem->io_size &&
+ !i915_ggtt_has_aperture(to_gt(mem->i915)->ggtt))
+ return ERR_PTR(-ENOSPC);
+
+ return __i915_gem_object_create_region(mem, offset, size, 0,
+ flags | I915_BO_ALLOC_CONTIGUOUS);
+}
+
/**
* i915_gem_process_region - Iterate over all objects of a region using ops
* to process and optionally skip objects
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index fcaa12d657d4..2dfcc41c0170 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -14,6 +14,8 @@ struct sg_table;
struct i915_gem_apply_to_region;
+#define I915_BO_INVALID_OFFSET ((resource_size_t)-1)
+
/**
* struct i915_gem_apply_to_region_ops - ops to use when iterating over all
* region objects.
@@ -56,6 +58,11 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
+struct drm_i915_gem_object *
+i915_gem_object_create_region_at(struct intel_memory_region *mem,
+ resource_size_t offset,
+ resource_size_t size,
+ unsigned int flags);
int i915_gem_process_region(struct intel_memory_region *mr,
struct i915_gem_apply_to_region *apply);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index c7541dc687c1..c2a3e388fcb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -553,6 +553,7 @@ static int __create_shmem(struct drm_i915_private *i915,
static int shmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 2a225daa2b12..47b5e0e342ab 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -12,6 +12,8 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_region_lmem.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
#include "i915_reg.h"
@@ -493,7 +495,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
/* Exclude the reserved region from driver use */
mem->region.end = reserved_base - 1;
- mem->io_size = resource_size(&mem->region);
+ mem->io_size = min(mem->io_size, resource_size(&mem->region));
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
@@ -680,6 +682,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -694,12 +697,32 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
if (size == 0)
return -EINVAL;
+ /*
+ * With discrete devices, where we lack a mappable aperture there is no
+ * possible way to ever access this memory on the CPU side.
+ */
+ if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
+ !(flags & I915_BO_ALLOC_GPU_ONLY))
+ return -ENOSPC;
+
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return -ENOMEM;
- ret = i915_gem_stolen_insert_node(i915, stolen, size,
- mem->min_page_size);
+ if (offset != I915_BO_INVALID_OFFSET) {
+ drm_dbg(&i915->drm,
+ "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
+ &offset, &size);
+
+ stolen->start = offset;
+ stolen->size = size;
+ mutex_lock(&i915->mm.stolen_lock);
+ ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
+ mutex_unlock(&i915->mm.stolen_lock);
+ } else {
+ ret = i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
+ }
if (ret)
goto err_free;
@@ -751,11 +774,6 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
if (GEM_WARN_ON(resource_size(&mem->region) == 0))
return -ENODEV;
- if (!io_mapping_init_wc(&mem->iomap,
- mem->io_start,
- mem->io_size))
- return -EIO;
-
/*
* TODO: For stolen lmem we mostly just care about populating the dsm
* related bits and setting up the drm_mm allocator for the range.
@@ -763,18 +781,26 @@ static int init_stolen_lmem(struct intel_memory_region *mem)
*/
err = i915_gem_init_stolen(mem);
if (err)
- goto err_fini;
+ return err;
+
+ if (mem->io_size && !io_mapping_init_wc(&mem->iomap,
+ mem->io_start,
+ mem->io_size)) {
+ err = -EIO;
+ goto err_cleanup;
+ }
return 0;
-err_fini:
- io_mapping_fini(&mem->iomap);
+err_cleanup:
+ i915_gem_cleanup_stolen(mem->i915);
return err;
}
static int release_stolen_lmem(struct intel_memory_region *mem)
{
- io_mapping_fini(&mem->iomap);
+ if (mem->io_size)
+ io_mapping_fini(&mem->iomap);
i915_gem_cleanup_stolen(mem->i915);
return 0;
}
@@ -791,25 +817,43 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
{
struct intel_uncore *uncore = &i915->uncore;
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ resource_size_t dsm_size, dsm_base, lmem_size;
struct intel_memory_region *mem;
+ resource_size_t io_start, io_size;
resource_size_t min_page_size;
- resource_size_t io_start;
- resource_size_t lmem_size;
- u64 lmem_base;
- lmem_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
- if (GEM_WARN_ON(lmem_base >= pci_resource_len(pdev, 2)))
+ if (WARN_ON_ONCE(instance))
return ERR_PTR(-ENODEV);
- lmem_size = pci_resource_len(pdev, 2) - lmem_base;
- io_start = pci_resource_start(pdev, 2) + lmem_base;
+ /* Use DSM base address instead for stolen memory */
+ dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
+ if (IS_DG1(uncore->i915)) {
+ lmem_size = pci_resource_len(pdev, 2);
+ if (WARN_ON(lmem_size < dsm_base))
+ return ERR_PTR(-ENODEV);
+ } else {
+ resource_size_t lmem_range;
+
+ lmem_range = intel_gt_read_register(&i915->gt0, XEHPSDV_TILE0_ADDR_RANGE) & 0xFFFF;
+ lmem_size = lmem_range >> XEHPSDV_TILE_LMEM_RANGE_SHIFT;
+ lmem_size *= SZ_1G;
+ }
+
+ dsm_size = lmem_size - dsm_base;
+ if (pci_resource_len(pdev, 2) < lmem_size) {
+ io_start = 0;
+ io_size = 0;
+ } else {
+ io_start = pci_resource_start(pdev, 2) + dsm_base;
+ io_size = dsm_size;
+ }
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
I915_GTT_PAGE_SIZE_4K;
- mem = intel_memory_region_create(i915, lmem_base, lmem_size,
+ mem = intel_memory_region_create(i915, dsm_base, dsm_size,
min_page_size,
- io_start, lmem_size,
+ io_start, io_size,
type, instance,
&i915_region_stolen_lmem_ops);
if (IS_ERR(mem))
@@ -823,6 +867,7 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
&mem->io_start);
+ drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base);
intel_memory_region_set_name(mem, "stolen-local");
@@ -851,63 +896,6 @@ i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
return mem;
}
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
- resource_size_t stolen_offset,
- resource_size_t size)
-{
- struct intel_memory_region *mem = i915->mm.stolen_region;
- struct drm_i915_gem_object *obj;
- struct drm_mm_node *stolen;
- int ret;
-
- if (!drm_mm_initialized(&i915->mm.stolen))
- return ERR_PTR(-ENODEV);
-
- drm_dbg(&i915->drm,
- "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
- &stolen_offset, &size);
-
- /* KISS and expect everything to be page-aligned */
- if (GEM_WARN_ON(size == 0) ||
- GEM_WARN_ON(!IS_ALIGNED(size, mem->min_page_size)) ||
- GEM_WARN_ON(!IS_ALIGNED(stolen_offset, mem->min_page_size)))
- return ERR_PTR(-EINVAL);
-
- stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
- if (!stolen)
- return ERR_PTR(-ENOMEM);
-
- stolen->start = stolen_offset;
- stolen->size = size;
- mutex_lock(&i915->mm.stolen_lock);
- ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
- mutex_unlock(&i915->mm.stolen_lock);
- if (ret)
- goto err_free;
-
- obj = i915_gem_object_alloc();
- if (!obj) {
- ret = -ENOMEM;
- goto err_stolen;
- }
-
- ret = __i915_gem_object_create_stolen(mem, obj, stolen);
- if (ret)
- goto err_object_free;
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- return obj;
-
-err_object_free:
- i915_gem_object_free(obj);
-err_stolen:
- i915_gem_stolen_remove_node(i915, stolen);
-err_free:
- kfree(stolen);
- return ERR_PTR(ret);
-}
-
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_object_stolen_ops;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index ccdf7befc571..d5005a39d130 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -31,10 +31,6 @@ i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
resource_size_t size);
-struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- resource_size_t stolen_offset,
- resource_size_t size);
bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index b9ae6b02f304..4c25d9b2f138 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -20,6 +20,7 @@
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
+#include "gt/intel_gpu_commands.h"
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
@@ -126,14 +127,22 @@ i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
struct ttm_place *place,
+ resource_size_t offset,
+ resource_size_t size,
unsigned int flags)
{
memset(place, 0, sizeof(*place));
place->mem_type = intel_region_to_ttm_type(mr);
+ if (mr->type == INTEL_MEMORY_SYSTEM)
+ return;
+
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mr->io_size && mr->io_size < mr->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place->fpfn = offset >> PAGE_SHIFT;
+ place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
+ } else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
@@ -155,12 +164,14 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, flags);
+ obj->mm.region, requested, obj->bo_offset,
+ obj->base.size, flags);
/* Cache this on object? */
placement->num_busy_placement = num_allowed;
for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
+ i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
+ obj->bo_offset, obj->base.size, flags);
if (num_allowed == 0) {
*busy = *requested;
@@ -255,12 +266,33 @@ static const struct i915_refct_sgt_ops tt_rsgt_ops = {
.release = i915_ttm_tt_release
};
+static inline bool
+i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
+{
+ bool lmem_placement = false;
+ int i;
+
+ for (i = 0; i < obj->mm.n_placements; i++) {
+ /* Compression is not allowed for the objects with smem placement */
+ if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
+ return false;
+ if (!lmem_placement &&
+ obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
+ lmem_placement = true;
+ }
+
+ return lmem_placement;
+}
+
static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
+ struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+ bdev);
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ unsigned long ccs_pages = 0;
enum ttm_caching caching;
struct i915_ttm_tt *i915_tt;
int ret;
@@ -283,7 +315,12 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, 0);
+ if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
+ NUM_BYTES_PER_CCS_BYTE),
+ PAGE_SIZE);
+
+ ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
if (ret)
goto err_free;
@@ -763,6 +800,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
i915_sg_dma_sizes(rsgt->table.sgl));
}
+ GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
i915_ttm_adjust_lru(obj);
return ret;
}
@@ -802,7 +840,8 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
struct ttm_placement placement;
int ret;
- i915_ttm_place_from_region(mr, &requested, flags);
+ i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
+ obj->base.size, flags);
placement.num_placement = 1;
placement.num_busy_placement = 1;
placement.placement = &requested;
@@ -1142,6 +1181,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
*/
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -1158,6 +1198,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
/* Don't put on a region list until we're either locked or fully initialized. */
obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 9d698ad00853..73e371aa3850 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -45,6 +45,7 @@ i915_ttm_to_gem(struct ttm_buffer_object *bo)
int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index b4275b55e5b8..62c61af77a42 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -88,7 +88,7 @@ out:
static int igt_dmabuf_import_same_driver_lmem(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
+ struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0];
struct drm_i915_gem_object *obj;
struct drm_gem_object *import;
struct dma_buf *dmabuf;
@@ -253,10 +253,10 @@ static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_memory_region *regions[2];
- if (!i915->mm.regions[INTEL_REGION_LMEM])
+ if (!i915->mm.regions[INTEL_REGION_LMEM_0])
return 0;
- regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
+ regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0];
regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
return igt_dmabuf_import_same_driver(i915, regions, 2);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 0ad443a90c8b..801af51aff62 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -47,14 +47,16 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
{
struct drm_i915_private *i915 = gt->i915;
struct intel_memory_region *src_mr = i915->mm.regions[src];
+ struct intel_memory_region *dst_mr = i915->mm.regions[dst];
struct drm_i915_gem_object *obj;
struct i915_gem_ww_ctx ww;
int err = 0;
GEM_BUG_ON(!src_mr);
+ GEM_BUG_ON(!dst_mr);
/* Switch object backing-store on create */
- obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
+ obj = i915_gem_object_create_region(src_mr, dst_mr->min_page_size, 0, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -92,17 +94,17 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
static int igt_smem_create_migrate(void *arg)
{
- return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
+ return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_SMEM);
}
static int igt_lmem_create_migrate(void *arg)
{
- return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
+ return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM_0);
}
static int igt_same_create_migrate(void *arg)
{
- return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
+ return igt_create_migrate(arg, INTEL_REGION_LMEM_0, INTEL_REGION_LMEM_0);
}
static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
@@ -152,7 +154,7 @@ static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
}
} else {
- err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
+ err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0);
if (err) {
pr_err("Object failed migration to lmem\n");
if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 6d6082b5f31f..8ac6726ec16b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -42,8 +42,7 @@ mock_context(struct drm_i915_private *i915,
if (!ppgtt)
goto err_free;
- ctx->vm = i915_vm_open(&ppgtt->vm);
- i915_vm_put(&ppgtt->vm);
+ ctx->vm = &ppgtt->vm;
}
mutex_init(&ctx->engines_mutex);
@@ -59,7 +58,7 @@ mock_context(struct drm_i915_private *i915,
err_vm:
if (ctx->vm)
- i915_vm_close(ctx->vm);
+ i915_vm_put(ctx->vm);
err_free:
kfree(ctx);
return NULL;
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 871fe7bda0e0..1bb766c79dcb 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -322,7 +322,7 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
- GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
+ GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref));
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index b1b9c3fd7bf9..9529c5455bc3 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -6,7 +6,6 @@
#include "gen8_engine_cs.h"
#include "i915_drv.h"
#include "intel_gpu_commands.h"
-#include "intel_gt_regs.h"
#include "intel_lrc.h"
#include "intel_ring.h"
@@ -165,33 +164,9 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
-static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine)
+u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
{
- static const i915_reg_t vd[] = {
- GEN12_VD0_AUX_NV,
- GEN12_VD1_AUX_NV,
- GEN12_VD2_AUX_NV,
- GEN12_VD3_AUX_NV,
- };
-
- static const i915_reg_t ve[] = {
- GEN12_VE0_AUX_NV,
- GEN12_VE1_AUX_NV,
- };
-
- if (engine->class == VIDEO_DECODE_CLASS)
- return vd[engine->instance];
-
- if (engine->class == VIDEO_ENHANCEMENT_CLASS)
- return ve[engine->instance];
-
- GEM_BUG_ON("unknown aux_inv reg\n");
- return INVALID_MMIO_REG;
-}
-
-static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs)
-{
- *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg);
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
@@ -236,7 +211,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (mode & EMIT_INVALIDATE) {
u32 flags = 0;
- u32 *cs;
+ u32 *cs, count;
flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TLB_INVALIDATE;
@@ -254,7 +229,12 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (engine->class == COMPUTE_CLASS)
flags &= ~PIPE_CONTROL_3D_FLAGS;
- cs = intel_ring_begin(rq, 8 + 4);
+ if (!HAS_FLAT_CCS(rq->engine->i915))
+ count = 8 + 4;
+ else
+ count = 8;
+
+ cs = intel_ring_begin(rq, count);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -267,8 +247,10 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
- /* hsdes: 1809175790 */
- cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs);
+ if (!HAS_FLAT_CCS(rq->engine->i915)) {
+ /* hsdes: 1809175790 */
+ cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+ }
*cs++ = preparser_disable(false);
intel_ring_advance(rq, cs);
@@ -283,12 +265,17 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
u32 cmd, *cs;
cmd = 4;
- if (mode & EMIT_INVALIDATE)
+ if (mode & EMIT_INVALIDATE) {
cmd += 2;
- if (mode & EMIT_INVALIDATE)
- aux_inv = rq->engine->mask & ~BIT(BCS0);
- if (aux_inv)
- cmd += 2 * hweight32(aux_inv) + 2;
+
+ if (!HAS_FLAT_CCS(rq->engine->i915) &&
+ (rq->engine->class == VIDEO_DECODE_CLASS ||
+ rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
+ aux_inv = rq->engine->mask & ~BIT(BCS0);
+ if (aux_inv)
+ cmd += 4;
+ }
+ }
cs = intel_ring_begin(rq, cmd);
if (IS_ERR(cs))
@@ -319,15 +306,10 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
*cs++ = 0; /* value */
if (aux_inv) { /* hsdes: 1809175790 */
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- *cs++ = MI_LOAD_REGISTER_IMM(hweight32(aux_inv));
- for_each_engine_masked(engine, rq->engine->gt, aux_inv, tmp) {
- *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
- *cs++ = AUX_INV;
- }
- *cs++ = MI_NOOP;
+ if (rq->engine->class == VIDEO_DECODE_CLASS)
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ else
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
}
if (mode & EMIT_INVALIDATE)
@@ -601,6 +583,43 @@ static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
return cs;
}
+/* Wa_14014475959:dg2 */
+#define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
+static u32 ccs_semaphore_offset(struct i915_request *rq)
+{
+ return i915_ggtt_offset(rq->context->state) +
+ (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
+}
+
+/* Wa_14014475959:dg2 */
+static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
+ MI_ATOMIC_MOVE;
+ *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /*
+ * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
+ * to align. 4 DWs above + 8 filler DWs here.
+ */
+ for (i = 0; i < 8; ++i)
+ *cs++ = 0;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = ccs_semaphore_offset(rq);
+ *cs++ = 0;
+
+ return cs;
+}
+
static __always_inline u32*
gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
{
@@ -611,6 +630,10 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
!intel_uc_uses_guc_submission(&rq->engine->gt->uc))
cs = gen12_emit_preempt_busywait(rq, cs);
+ /* Wa_14014475959:dg2 */
+ if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
+ cs = ccs_emit_wa_busywait(rq, cs);
+
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
index cc6e21d3662a..107ab42539ab 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h
@@ -10,7 +10,7 @@
#include <linux/types.h>
#include "i915_gem.h" /* GEM_BUG_ON */
-
+#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
struct i915_request;
@@ -38,6 +38,8 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
+u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
+
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
{
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index f574da00eff1..c7bd5d71b03e 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -454,11 +454,11 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
pd = pdp->entry[gen8_pd_index(idx, 2)];
}
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
return idx;
}
@@ -631,7 +631,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
}
} while (rem >= page_size && index < I915_PDES);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
@@ -647,7 +647,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -668,7 +668,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
}
@@ -722,7 +722,7 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
vaddr = px_vaddr(pt);
vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
- clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
+ drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 5d0ec7c49b6a..4070cb5711d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -386,7 +386,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
ce->ring = NULL;
ce->ring_size = SZ_4K;
- ewma_runtime_init(&ce->runtime.avg);
+ ewma_runtime_init(&ce->stats.runtime.avg);
ce->vm = i915_vm_get(engine->gt->vm);
@@ -400,7 +400,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
INIT_LIST_HEAD(&ce->guc_state.fences);
INIT_LIST_HEAD(&ce->guc_state.requests);
- ce->guc_id.id = GUC_INVALID_LRC_ID;
+ ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
INIT_LIST_HEAD(&ce->guc_id.link);
INIT_LIST_HEAD(&ce->destroyed_link);
@@ -576,6 +576,31 @@ void intel_context_bind_parent_child(struct intel_context *parent,
child->parallel.parent = parent;
}
+u64 intel_context_get_total_runtime_ns(const struct intel_context *ce)
+{
+ u64 total, active;
+
+ total = ce->stats.runtime.total;
+ if (ce->ops->flags & COPS_RUNTIME_CYCLES)
+ total *= ce->engine->gt->clock_period_ns;
+
+ active = READ_ONCE(ce->stats.active);
+ if (active)
+ active = intel_context_clock() - active;
+
+ return total + active;
+}
+
+u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+ u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
+
+ if (ce->ops->flags & COPS_RUNTIME_CYCLES)
+ avg *= ce->engine->gt->clock_period_ns;
+
+ return avg;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d8c74bbf9aae..b7d3214d2cdd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -351,18 +351,13 @@ intel_context_clear_nopreempt(struct intel_context *ce)
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
-static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
-{
- const u32 period = ce->engine->gt->clock_period_ns;
-
- return READ_ONCE(ce->runtime.total) * period;
-}
+u64 intel_context_get_total_runtime_ns(const struct intel_context *ce);
+u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
-static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+static inline u64 intel_context_clock(void)
{
- const u32 period = ce->engine->gt->clock_period_ns;
-
- return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
+ /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */
+ return ktime_get_raw_fast_ns();
}
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 30cd81ad8911..09f82545789f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -35,6 +35,9 @@ struct intel_context_ops {
#define COPS_HAS_INFLIGHT_BIT 0
#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
+#define COPS_RUNTIME_CYCLES_BIT 1
+#define COPS_RUNTIME_CYCLES BIT(COPS_RUNTIME_CYCLES_BIT)
+
int (*alloc)(struct intel_context *ce);
void (*ban)(struct intel_context *ce, struct i915_request *rq);
@@ -134,14 +137,19 @@ struct intel_context {
} lrc;
u32 tag; /* cookie passed to HW to track this context on submission */
- /* Time on GPU as tracked by the hw. */
- struct {
- struct ewma_runtime avg;
- u64 total;
- u32 last;
- I915_SELFTEST_DECLARE(u32 num_underflow);
- I915_SELFTEST_DECLARE(u32 max_underflow);
- } runtime;
+ /** stats: Context GPU engine busyness tracking. */
+ struct intel_context_stats {
+ u64 active;
+
+ /* Time on GPU as tracked by the hw. */
+ struct {
+ struct ewma_runtime avg;
+ u64 total;
+ u32 last;
+ I915_SELFTEST_DECLARE(u32 num_underflow);
+ I915_SELFTEST_DECLARE(u32 max_underflow);
+ } runtime;
+ } stats;
unsigned int active_count; /* protected by timeline->mutex */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 1c0ab05c3c40..1431f1e9dbee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -4,6 +4,7 @@
#include <asm/cacheflush.h>
#include <drm/drm_util.h>
+#include <drm/drm_cache.h>
#include <linux/hashtable.h>
#include <linux/irq_work.h>
@@ -143,15 +144,9 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
* of extra paranoia to try and ensure that the HWS takes the value
* we give and that it doesn't end up trapped inside the CPU!
*/
- if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
- mb();
- clflush(&engine->status_page.addr[reg]);
- engine->status_page.addr[reg] = value;
- clflush(&engine->status_page.addr[reg]);
- mb();
- } else {
- WRITE_ONCE(engine->status_page.addr[reg], value);
- }
+ drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
+ WRITE_ONCE(engine->status_page.addr[reg], value);
+ drm_clflush_virt_range(&engine->status_page.addr[reg], sizeof(value));
}
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7447411a5b26..14c6ddbbfde8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -436,6 +436,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
if (GRAPHICS_VER(i915) == 12 && engine->class == RENDER_CLASS)
engine->props.preempt_timeout_ms = 0;
+ if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) &&
+ __ffs(CCS_MASK(engine->gt)) == engine->instance) ||
+ engine->class == RENDER_CLASS)
+ engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
+
/* features common between engines sharing EUs */
if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
@@ -726,12 +731,24 @@ static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
{
- int i;
- u8 map[MAX_ENGINE_INSTANCE + 1];
+ /*
+ * Logical to physical mapping is needed for proper support
+ * to split-frame feature.
+ */
+ if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) {
+ const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 };
+
+ populate_logical_ids(gt, logical_ids, class,
+ map, ARRAY_SIZE(map));
+ } else {
+ int i;
+ u8 map[MAX_ENGINE_INSTANCE + 1];
- for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
- map[i] = i;
- populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map));
+ for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
+ map[i] = i;
+ populate_logical_ids(gt, logical_ids, class,
+ map, ARRAY_SIZE(map));
+ }
}
/**
@@ -1263,6 +1280,15 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
int err;
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+
+ /*
+ * Wa_22011802037 : gen12, Prior to doing a reset, ensure CS is
+ * stopped, set ring stop bit and prefetch disable bit to halt CS
+ */
+ if (GRAPHICS_VER(engine->i915) == 12)
+ intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
+ _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
fast_timeout_us,
@@ -1697,9 +1723,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
- if (intel_engine_uses_guc(engine)) {
- /* nothing to print yet */
- } else if (HAS_EXECLISTS(dev_priv)) {
+ if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) {
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index 0bf8b45c9319..594a629cb28f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -181,6 +181,7 @@
#define GFX_SURFACE_FAULT_ENABLE (1 << 12)
#define GFX_REPLAY_MODE (1 << 11)
#define GFX_PSMI_GRANULARITY (1 << 10)
+#define GEN12_GFX_PREFETCH_DISABLE REG_BIT(10)
#define GFX_PPGTT_ENABLE (1 << 9)
#define GEN8_GFX_PPGTT_48B (1 << 7)
#define GFX_FORWARD_VBLANK_MASK (3 << 5)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 19ff8758e34d..298f2cc7a879 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -96,7 +96,9 @@ struct i915_ctx_workarounds {
#define I915_MAX_VCS 8
#define I915_MAX_VECS 4
+#define I915_MAX_SFC (I915_MAX_VCS / 2)
#define I915_MAX_CCS 4
+#define I915_MAX_RCS 1
/*
* Engine IDs definitions.
@@ -526,6 +528,8 @@ struct intel_engine_cs {
#define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8)
#define I915_ENGINE_HAS_RCS_REG_STATE BIT(9)
#define I915_ENGINE_HAS_EU_PRIORITY BIT(10)
+#define I915_ENGINE_FIRST_RENDER_COMPUTE BIT(11)
+#define I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT BIT(12)
unsigned int flags;
/*
@@ -626,6 +630,13 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO;
}
+/* Wa_14014475959:dg2 */
+static inline bool
+intel_engine_uses_wa_hold_ccs_switchout(struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+}
+
#define instdone_has_slice(dev_priv___, sseu___, slice___) \
((GRAPHICS_VER(dev_priv___) == 7 ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
@@ -643,7 +654,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
#define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \
for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \
- (iter_) < GEN_MAX_SUBSLICES; \
+ (iter_) < GEN_SS_MASK_SIZE; \
(iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \
(dss_) = (iter_) % GEN_DSS_PER_GSLICE) \
for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_)))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index b8c9b6b89003..0f6cd96b459f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -193,7 +193,6 @@ static void add_legacy_ring(struct legacy_ring *ring,
void intel_engines_driver_register(struct drm_i915_private *i915)
{
struct legacy_ring ring = {};
- u8 uabi_instances[5] = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
LIST_HEAD(engines);
@@ -214,8 +213,10 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
- GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances));
- engine->uabi_instance = uabi_instances[engine->uabi_class]++;
+ GEM_BUG_ON(engine->uabi_class >=
+ ARRAY_SIZE(i915->engine_uabi_class_count));
+ engine->uabi_instance =
+ i915->engine_uabi_class_count[engine->uabi_class]++;
/* Replace the internal name with the final user facing name */
memcpy(old, engine->name, sizeof(engine->name));
@@ -245,8 +246,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
int class, inst;
int errors = 0;
- for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) {
- for (inst = 0; inst < uabi_instances[class]; inst++) {
+ for (class = 0; class < ARRAY_SIZE(i915->engine_uabi_class_count); class++) {
+ for (inst = 0; inst < i915->engine_uabi_class_count[class]; inst++) {
engine = intel_engine_lookup_user(i915,
class, inst);
if (!engine) {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 1c602d4ae297..f8749c433b7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -625,8 +625,6 @@ static void __execlists_schedule_out(struct i915_request * const rq,
GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
__set_bit(ccid - 1, &engine->context_tag);
}
-
- lrc_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
@@ -1651,12 +1649,6 @@ cancel_port_requests(struct intel_engine_execlists * const execlists,
return inactive;
}
-static void invalidate_csb_entries(const u64 *first, const u64 *last)
-{
- clflush((void *)first);
- clflush((void *)last);
-}
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -2004,15 +1996,30 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* the wash as hardware, working or not, will need to do the
* invalidation before.
*/
- invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+ drm_clflush_virt_range(&buf[0], num_entries * sizeof(buf[0]));
/*
* We assume that any event reflects a change in context flow
* and merits a fresh timeslice. We reinstall the timer after
* inspecting the queue to see if we need to resumbit.
*/
- if (*prev != *execlists->active) /* elide lite-restores */
+ if (*prev != *execlists->active) { /* elide lite-restores */
+ /*
+ * Note the inherent discrepancy between the HW runtime,
+ * recorded as part of the context switch, and the CPU
+ * adjustment for active contexts. We have to hope that
+ * the delay in processing the CS event is very small
+ * and consistent. It works to our advantage to have
+ * the CPU adjustment _undershoot_ (i.e. start later than)
+ * the CS timestamp so we never overreport the runtime
+ * and correct overselves later when updating from HW.
+ */
+ if (*prev)
+ lrc_runtime_stop((*prev)->context);
+ if (*execlists->active)
+ lrc_runtime_start((*execlists->active)->context);
new_timeslice(execlists);
+ }
return inactive;
}
@@ -2236,11 +2243,11 @@ static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
if (!cap->error)
goto err_cap;
- cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp, CORE_DUMP_FLAG_NONE);
if (!cap->error->gt)
goto err_gpu;
- cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp, CORE_DUMP_FLAG_NONE);
if (!cap->error->gt->engine)
goto err_gt;
@@ -2644,7 +2651,7 @@ unwind:
}
static const struct intel_context_ops execlists_context_ops = {
- .flags = COPS_HAS_INFLIGHT,
+ .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
.alloc = execlists_context_alloc,
@@ -2788,8 +2795,9 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
/* Check that the GPU does indeed update the CSB entries! */
memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64));
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[reset_value]);
+ drm_clflush_virt_range(execlists->csb_status,
+ execlists->csb_size *
+ sizeof(execlists->csb_status));
/* Once more for luck and our trusty paranoia */
ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR,
@@ -2833,7 +2841,7 @@ static void execlists_sanitize(struct intel_engine_cs *engine)
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
@@ -2912,7 +2920,7 @@ static int execlists_resume(struct intel_engine_cs *engine)
enable_execlists(engine);
- if (engine->class == RENDER_CLASS)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
xehp_enable_ccs_engines(engine);
return 0;
@@ -2958,9 +2966,8 @@ reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- mb(); /* paranoia: read the CSB pointers from after the reset */
- clflush(execlists->csb_write);
- mb();
+ drm_clflush_virt_range(execlists->csb_write,
+ sizeof(execlists->csb_write[0]));
inactive = process_csb(engine, inactive); /* drain preemption events */
@@ -3702,7 +3709,7 @@ virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
}
static const struct intel_context_ops virtual_context_ops = {
- .flags = COPS_HAS_INFLIGHT,
+ .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES,
.alloc = virtual_context_alloc,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index e164f30a900d..e6b2eb122ad7 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -3,18 +3,16 @@
* Copyright © 2020 Intel Corporation
*/
-#include <linux/agp_backend.h>
-#include <linux/stop_machine.h>
-
+#include <linux/types.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <drm/i915_drm.h>
-#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
#include "intel_gt.h"
+#include "intel_gt_gmch.h"
#include "intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -95,28 +93,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
return 0;
}
-/*
- * Certain Gen5 chipsets require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static bool needs_idle_maps(struct drm_i915_private *i915)
-{
- /*
- * Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
- */
- if (!i915_vtd_active(i915))
- return false;
-
- if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
- return true;
-
- if (GRAPHICS_VER(i915) == 12)
- return true; /* XXX DMAR fault reason 7 */
-
- return false;
-}
-
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
@@ -127,7 +103,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- int open;
+ int save_skip_rewrite;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
@@ -136,8 +112,12 @@ retry:
mutex_lock(&vm->mutex);
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&vm->open, 0);
+ /*
+ * Skip rewriting PTE on VMA unbind.
+ * FIXME: Use an argument to i915_vma_unbind() instead?
+ */
+ save_skip_rewrite = vm->skip_pte_rewrite;
+ vm->skip_pte_rewrite = true;
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -155,16 +135,14 @@ retry:
*/
i915_gem_object_get(obj);
- atomic_set(&vm->open, open);
mutex_unlock(&vm->mutex);
i915_gem_object_lock(obj, NULL);
- open = i915_vma_unbind(vma);
+ GEM_WARN_ON(i915_vma_unbind(vma));
i915_gem_object_unlock(obj);
-
- GEM_WARN_ON(open);
-
i915_gem_object_put(obj);
+
+ vm->skip_pte_rewrite = save_skip_rewrite;
goto retry;
}
@@ -180,7 +158,7 @@ retry:
vm->clear_range(vm, 0, vm->total);
- atomic_set(&vm->open, open);
+ vm->skip_pte_rewrite = save_skip_rewrite;
mutex_unlock(&vm->mutex);
}
@@ -203,7 +181,7 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
spin_unlock_irq(&uncore->lock);
}
-static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
+void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
{
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -228,11 +206,6 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
-static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
-{
- intel_gtt_chipset_flush();
-}
-
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -245,258 +218,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void gen8_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
-
- ggtt->invalidate(ggtt);
-}
-
-static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *gte;
- gen8_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- gen8_set_pte(gte++, pte_encode | addr);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- gen8_set_pte(gte++, vm->scratch[0]->encode);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void gen6_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
-
- iowrite32(vm->pte_encode(addr, level, flags), pte);
-
- ggtt->invalidate(ggtt);
-}
-
-/*
- * Binds an object into the global gtt with the specified cache level.
- * The object will be accessible to the GPU via commands whose operands
- * reference offsets within the global GTT as well as accessible by the GPU
- * through the GMADR mapped BAR (i915->mm.gtt->gtt).
- */
-static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *gte;
- gen6_pte_t __iomem *end;
- struct sgt_iter iter;
- dma_addr_t addr;
-
- gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
-
- for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
- iowrite32(vm->pte_encode(addr, level, flags), gte++);
- GEM_BUG_ON(gte > end);
-
- /* Fill the allocated but "unused" space beyond the end of the buffer */
- while (gte < end)
- iowrite32(vm->scratch[0]->encode, gte++);
-
- /*
- * We want to flush the TLBs only after we're certain all the PTE
- * updates have finished.
- */
- ggtt->invalidate(ggtt);
-}
-
-static void nop_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
-static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
-{
- /*
- * Make sure the internal GAM fifo has been cleared of all GTT
- * writes before exiting stop_machine(). This guarantees that
- * any aperture accesses waiting to start in another process
- * cannot back up behind the GTT writes causing a hang.
- * The register can be any arbitrary GAM register.
- */
- intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
-}
-
-struct insert_page {
- struct i915_address_space *vm;
- dma_addr_t addr;
- u64 offset;
- enum i915_cache_level level;
-};
-
-static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
-{
- struct insert_page *arg = _arg;
-
- gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 unused)
-{
- struct insert_page arg = { vm, addr, offset, level };
-
- stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
-}
-
-struct insert_entries {
- struct i915_address_space *vm;
- struct i915_vma_resource *vma_res;
- enum i915_cache_level level;
- u32 flags;
-};
-
-static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
-
- gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, vma_res, level, flags };
-
- stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
-}
-
-static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- scratch_pte = vm->scratch[0]->encode;
- for (i = 0; i < num_entries; i++)
- iowrite32(scratch_pte, &gtt_base[i]);
-}
-
-static void i915_ggtt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
-}
-
-static void i915_ggtt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- unsigned int flags = (cache_level == I915_CACHE_NONE) ?
- AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
- intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
- flags);
-}
-
-static void i915_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
-}
-
-static void ggtt_bind_vma(struct i915_address_space *vm,
+void intel_ggtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
@@ -520,7 +242,7 @@ static void ggtt_bind_vma(struct i915_address_space *vm,
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
}
-static void ggtt_unbind_vma(struct i915_address_space *vm,
+void intel_ggtt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
@@ -723,10 +445,10 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
ggtt->alias = ppgtt;
ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
- GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
+ GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
- GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
+ GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
@@ -749,8 +471,8 @@ static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
i915_vm_put(&ppgtt->vm);
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
}
int i915_init_ggtt(struct drm_i915_private *i915)
@@ -774,13 +496,13 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
- atomic_set(&ggtt->vm.open, 0);
-
flush_workqueue(ggtt->vm.i915->wq);
i915_gem_drain_freed_objects(ggtt->vm.i915);
mutex_lock(&ggtt->vm.mutex);
+ ggtt->vm.skip_pte_rewrite = true;
+
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
bool trylock;
@@ -838,364 +560,12 @@ void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
dma_resv_fini(&ggtt->vm._resv);
}
-static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
- return snb_gmch_ctl << 20;
-}
-
-static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
- if (bdw_gmch_ctl)
- bdw_gmch_ctl = 1 << bdw_gmch_ctl;
-
-#ifdef CONFIG_X86_32
- /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
- if (bdw_gmch_ctl > 4)
- bdw_gmch_ctl = 4;
-#endif
-
- return bdw_gmch_ctl << 20;
-}
-
-static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GGMS_MASK;
-
- if (gmch_ctrl)
- return 1 << (20 + gmch_ctrl);
-
- return 0;
-}
-
-static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
-{
- /*
- * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
- * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
- */
- GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
- return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
-}
-
-static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
-{
- return gen6_gttmmadr_size(i915) / 2;
-}
-
-static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- phys_addr_t phys_addr;
- u32 pte_flags;
- int ret;
-
- GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
- phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
-
- /*
- * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
- * will be dropped. For WC mappings in general we have 64 byte burst
- * writes when the WC buffer is flushed, so we can't use it, but have to
- * resort to an uncached mapping. The WC issue is easily caught by the
- * readback check when writing GTT PTE entries.
- */
- if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
- ggtt->gsm = ioremap(phys_addr, size);
- else
- ggtt->gsm = ioremap_wc(phys_addr, size);
- if (!ggtt->gsm) {
- drm_err(&i915->drm, "Failed to map the ggtt page table\n");
- return -ENOMEM;
- }
-
- kref_init(&ggtt->vm.resv_ref);
- ret = setup_scratch_page(&ggtt->vm);
- if (ret) {
- drm_err(&i915->drm, "Scratch setup failed\n");
- /* iounmap will also get called at remove, but meh */
- iounmap(ggtt->gsm);
- return ret;
- }
-
- pte_flags = 0;
- if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
- pte_flags |= PTE_LM;
-
- ggtt->vm.scratch[0]->encode =
- ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
- I915_CACHE_NONE, pte_flags);
-
- return 0;
-}
-
-static void gen6_gmch_remove(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-
- iounmap(ggtt->gsm);
- free_scratch(vm);
-}
-
-static struct resource pci_resource(struct pci_dev *pdev, int bar)
+struct resource intel_pci_resource(struct pci_dev *pdev, int bar)
{
return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
}
-static int gen8_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- /* TODO: We're not aware of mappable constraints on gen8 yet */
- if (!HAS_LMEM(i915)) {
- ggtt->gmadr = pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- if (IS_CHERRYVIEW(i915))
- size = chv_get_total_gtt_size(snb_gmch_ctl);
- else
- size = gen8_get_total_gtt_size(snb_gmch_ctl);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
- ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
-
- ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- ggtt->vm.cleanup = gen6_gmch_remove;
- ggtt->vm.insert_page = gen8_ggtt_insert_page;
- ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
-
- ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
-
- /*
- * Serialize GTT updates with aperture access on BXT if VT-d is on,
- * and always on CHV.
- */
- if (intel_vm_no_concurrent_access_wa(i915)) {
- ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
- ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- ggtt->vm.bind_async_flags =
- I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
- }
-
- ggtt->invalidate = gen8_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
- ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
-
- setup_private_pat(ggtt->vm.gt->uncore);
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static u64 snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_L3_LLC:
- pte |= GEN7_PTE_CACHE_L3_LLC;
- break;
- case I915_CACHE_LLC:
- pte |= GEN6_PTE_CACHE_LLC;
- break;
- case I915_CACHE_NONE:
- pte |= GEN6_PTE_UNCACHED;
- break;
- default:
- MISSING_CASE(level);
- }
-
- return pte;
-}
-
-static u64 byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (!(flags & PTE_READ_ONLY))
- pte |= BYT_PTE_WRITEABLE;
-
- if (level != I915_CACHE_NONE)
- pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
-
- return pte;
-}
-
-static u64 hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- if (level != I915_CACHE_NONE)
- pte |= HSW_WB_LLC_AGE3;
-
- return pte;
-}
-
-static u64 iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
-
- switch (level) {
- case I915_CACHE_NONE:
- break;
- case I915_CACHE_WT:
- pte |= HSW_WT_ELLC_LLC_AGE3;
- break;
- default:
- pte |= HSW_WB_ELLC_LLC_AGE3;
- break;
- }
-
- return pte;
-}
-
-static int gen6_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- unsigned int size;
- u16 snb_gmch_ctl;
-
- ggtt->gmadr = pci_resource(pdev, 2);
- ggtt->mappable_end = resource_size(&ggtt->gmadr);
-
- /*
- * 64/512MB is the current min/max we actually know of, but this is
- * just a coarse sanity check.
- */
- if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
- &ggtt->mappable_end);
- return -ENXIO;
- }
-
- pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
- size = gen6_get_total_gtt_size(snb_gmch_ctl);
- ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen6_ggtt_clear_range;
- ggtt->vm.insert_page = gen6_ggtt_insert_page;
- ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
- ggtt->vm.cleanup = gen6_gmch_remove;
-
- ggtt->invalidate = gen6_ggtt_invalidate;
-
- if (HAS_EDRAM(i915))
- ggtt->vm.pte_encode = iris_pte_encode;
- else if (IS_HASWELL(i915))
- ggtt->vm.pte_encode = hsw_pte_encode;
- else if (IS_VALLEYVIEW(i915))
- ggtt->vm.pte_encode = byt_pte_encode;
- else if (GRAPHICS_VER(i915) >= 7)
- ggtt->vm.pte_encode = ivb_pte_encode;
- else
- ggtt->vm.pte_encode = snb_pte_encode;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
- return ggtt_probe_common(ggtt, size);
-}
-
-static void i915_gmch_remove(struct i915_address_space *vm)
-{
- intel_gmch_remove();
-}
-
-static int i915_gmch_probe(struct i915_ggtt *ggtt)
-{
- struct drm_i915_private *i915 = ggtt->vm.i915;
- phys_addr_t gmadr_base;
- int ret;
-
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
- if (!ret) {
- drm_err(&i915->drm, "failed to set up gmch\n");
- return -EIO;
- }
-
- intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
-
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
-
- ggtt->vm.alloc_pt_dma = alloc_pt_dma;
- ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
-
- if (needs_idle_maps(i915)) {
- drm_notice(&i915->drm,
- "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
- ggtt->do_idle_maps = true;
- }
-
- ggtt->vm.insert_page = i915_ggtt_insert_page;
- ggtt->vm.insert_entries = i915_ggtt_insert_entries;
- ggtt->vm.clear_range = i915_ggtt_clear_range;
- ggtt->vm.cleanup = i915_gmch_remove;
-
- ggtt->invalidate = gmch_ggtt_invalidate;
-
- ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
- ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
-
- if (unlikely(ggtt->do_idle_maps))
- drm_notice(&i915->drm,
- "Applying Ironlake quirks for intel_iommu\n");
-
- return 0;
-}
-
static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -1207,11 +577,11 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
dma_resv_init(&ggtt->vm._resv);
if (GRAPHICS_VER(i915) <= 5)
- ret = i915_gmch_probe(ggtt);
+ ret = intel_gt_gmch_gen5_probe(ggtt);
else if (GRAPHICS_VER(i915) < 8)
- ret = gen6_gmch_probe(ggtt);
+ ret = intel_gt_gmch_gen6_probe(ggtt);
else
- ret = gen8_gmch_probe(ggtt);
+ ret = intel_gt_gmch_gen8_probe(ggtt);
if (ret) {
dma_resv_fini(&ggtt->vm._resv);
return ret;
@@ -1265,10 +635,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
- return -EIO;
-
- return 0;
+ return intel_gt_gmch_gen5_enable_hw(i915);
}
void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
@@ -1308,16 +675,12 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
- int open;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
vm->clear_range(vm, 0, vm->total);
- /* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&vm->open, 0);
-
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -1334,8 +697,6 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
}
}
- atomic_set(&vm->open, open);
-
return write_domain_objs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index d112ffd56418..e52718a87f14 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -134,6 +134,13 @@
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
#define MI_USE_GGTT (1 << 22) /* g4x+ */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_ATOMIC MI_INSTR(0x2f, 1)
+#define MI_ATOMIC_INLINE (MI_INSTR(0x2f, 9) | MI_ATOMIC_INLINE_DATA)
+#define MI_ATOMIC_GLOBAL_GTT (1 << 22)
+#define MI_ATOMIC_INLINE_DATA (1 << 18)
+#define MI_ATOMIC_CS_STALL (1 << 17)
+#define MI_ATOMIC_MOVE (0x4 << 8)
+
/*
* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
* - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
@@ -144,6 +151,7 @@
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */
#define MI_LRI_LRM_CS_MMIO REG_BIT(19)
+#define MI_LRI_MMIO_REMAP_EN REG_BIT(17)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_LOAD_REGISTER_IMM_MAX_REGS (126)
#define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1)
@@ -153,8 +161,10 @@
#define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22)
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
+#define MI_FLUSH_DW_CCS (1<<16)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_FLUSH_DW_OP_MASK (3<<14)
+#define MI_FLUSH_DW_LLC (1<<9)
#define MI_FLUSH_DW_NOTIFY (1<<8)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
@@ -203,8 +213,27 @@
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define XY_CTRL_SURF_INSTR_SIZE 5
+#define MI_FLUSH_DW_SIZE 3
+#define XY_CTRL_SURF_COPY_BLT ((2 << 29) | (0x48 << 22) | 3)
+#define SRC_ACCESS_TYPE_SHIFT 21
+#define DST_ACCESS_TYPE_SHIFT 20
+#define CCS_SIZE_MASK 0x3FF
+#define CCS_SIZE_SHIFT 8
+#define XY_CTRL_SURF_MOCS_MASK GENMASK(31, 25)
+#define NUM_CCS_BYTES_PER_BLOCK 256
+#define NUM_BYTES_PER_CCS_BYTE 256
+#define NUM_CCS_BLKS_PER_XFER 1024
+#define INDIRECT_ACCESS 0
+#define DIRECT_ACCESS 1
+
#define COLOR_BLT_CMD (2 << 29 | 0x40 << 22 | (5 - 2))
#define XY_COLOR_BLT_CMD (2 << 29 | 0x50 << 22)
+#define XY_FAST_COLOR_BLT_CMD (2 << 29 | 0x44 << 22)
+#define XY_FAST_COLOR_BLT_DEPTH_32 (2 << 19)
+#define XY_FAST_COLOR_BLT_DW 16
+#define XY_FAST_COLOR_BLT_MOCS_MASK GENMASK(27, 21)
+#define XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT 31
#define SRC_COPY_BLT_CMD (2 << 29 | 0x43 << 22)
#define GEN9_XY_FAST_COPY_BLT_CMD (2 << 29 | 0x42 << 22)
#define XY_SRC_COPY_BLT_CMD (2 << 29 | 0x53 << 22)
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
new file mode 100644
index 000000000000..0e494028b81d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/irq.h>
+#include <linux/mei_aux.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "gt/intel_gsc.h"
+#include "gt/intel_gt.h"
+
+#define GSC_BAR_LENGTH 0x00000FFC
+
+static void gsc_irq_mask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static void gsc_irq_unmask(struct irq_data *d)
+{
+ /* generic irq handling */
+}
+
+static struct irq_chip gsc_irq_chip = {
+ .name = "gsc_irq_chip",
+ .irq_mask = gsc_irq_mask,
+ .irq_unmask = gsc_irq_unmask,
+};
+
+static int gsc_irq_init(int irq)
+{
+ irq_set_chip_and_handler_name(irq, &gsc_irq_chip,
+ handle_simple_irq, "gsc_irq_handler");
+
+ return irq_set_chip_data(irq, NULL);
+}
+
+struct gsc_def {
+ const char *name;
+ unsigned long bar;
+ size_t bar_size;
+};
+
+/* gsc resources and definitions (HECI1 and HECI2) */
+static const struct gsc_def gsc_def_dg1[] = {
+ {
+ /* HECI1 not yet implemented. */
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG1_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ }
+};
+
+static const struct gsc_def gsc_def_dg2[] = {
+ {
+ .name = "mei-gsc",
+ .bar = DG2_GSC_HECI1_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ },
+ {
+ .name = "mei-gscfi",
+ .bar = DG2_GSC_HECI2_BASE,
+ .bar_size = GSC_BAR_LENGTH,
+ }
+};
+
+static void gsc_release_dev(struct device *dev)
+{
+ struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
+ struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+
+ kfree(adev);
+}
+
+static void gsc_destroy_one(struct intel_gsc_intf *intf)
+{
+ if (intf->adev) {
+ auxiliary_device_delete(&intf->adev->aux_dev);
+ auxiliary_device_uninit(&intf->adev->aux_dev);
+ intf->adev = NULL;
+ }
+ if (intf->irq >= 0)
+ irq_free_desc(intf->irq);
+ intf->irq = -1;
+}
+
+static void gsc_init_one(struct drm_i915_private *i915,
+ struct intel_gsc_intf *intf,
+ unsigned int intf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct mei_aux_device *adev;
+ struct auxiliary_device *aux_dev;
+ const struct gsc_def *def;
+ int ret;
+
+ intf->irq = -1;
+ intf->id = intf_id;
+
+ if (intf_id == 0 && !HAS_HECI_PXP(i915))
+ return;
+
+ if (IS_DG1(i915)) {
+ def = &gsc_def_dg1[intf_id];
+ } else if (IS_DG2(i915)) {
+ def = &gsc_def_dg2[intf_id];
+ } else {
+ drm_warn_once(&i915->drm, "Unknown platform\n");
+ return;
+ }
+
+ if (!def->name) {
+ drm_warn_once(&i915->drm, "HECI%d is not implemented!\n", intf_id + 1);
+ return;
+ }
+
+ intf->irq = irq_alloc_desc(0);
+ if (intf->irq < 0) {
+ drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
+ return;
+ }
+
+ ret = gsc_irq_init(intf->irq);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc irq init failed %d\n", ret);
+ goto fail;
+ }
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ goto fail;
+
+ adev->irq = intf->irq;
+ adev->bar.parent = &pdev->resource[0];
+ adev->bar.start = def->bar + pdev->resource[0].start;
+ adev->bar.end = adev->bar.start + def->bar_size - 1;
+ adev->bar.flags = IORESOURCE_MEM;
+ adev->bar.desc = IORES_DESC_NONE;
+
+ aux_dev = &adev->aux_dev;
+ aux_dev->name = def->name;
+ aux_dev->id = (pci_domain_nr(pdev->bus) << 16) |
+ PCI_DEVID(pdev->bus->number, pdev->devfn);
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = gsc_release_dev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc aux init failed %d\n", ret);
+ kfree(adev);
+ goto fail;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret < 0) {
+ drm_err(&i915->drm, "gsc aux add failed %d\n", ret);
+ /* adev will be freed with the put_device() and .release sequence */
+ auxiliary_device_uninit(aux_dev);
+ goto fail;
+ }
+ intf->adev = adev;
+
+ return;
+fail:
+ gsc_destroy_one(intf);
+}
+
+static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
+{
+ int ret;
+
+ if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
+ drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
+ return;
+ }
+
+ if (!HAS_HECI_GSC(gt->i915)) {
+ drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
+ return;
+ }
+
+ if (gt->gsc.intf[intf_id].irq < 0) {
+ drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
+ return;
+ }
+
+ ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
+ if (ret)
+ drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
+}
+
+void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
+{
+ if (iir & GSC_IRQ_INTF(0))
+ gsc_irq_handler(gt, 0);
+ if (iir & GSC_IRQ_INTF(1))
+ gsc_irq_handler(gt, 1);
+}
+
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
+{
+ unsigned int i;
+
+ if (!HAS_HECI_GSC(i915))
+ return;
+
+ for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
+ gsc_init_one(i915, &gsc->intf[i], i);
+}
+
+void intel_gsc_fini(struct intel_gsc *gsc)
+{
+ struct intel_gt *gt = gsc_to_gt(gsc);
+ unsigned int i;
+
+ if (!HAS_HECI_GSC(gt->i915))
+ return;
+
+ for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
+ gsc_destroy_one(&gsc->intf[i]);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
new file mode 100644
index 000000000000..68582f912b21
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ */
+#ifndef __INTEL_GSC_DEV_H__
+#define __INTEL_GSC_DEV_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_gt;
+struct mei_aux_device;
+
+#define INTEL_GSC_NUM_INTERFACES 2
+/*
+ * The HECI1 bit corresponds to bit15 and HECI2 to bit14.
+ * The reason for this is to allow growth for more interfaces in the future.
+ */
+#define GSC_IRQ_INTF(_x) BIT(15 - (_x))
+
+/**
+ * struct intel_gsc - graphics security controller
+ * @intf : gsc interface
+ */
+struct intel_gsc {
+ struct intel_gsc_intf {
+ struct mei_aux_device *adev;
+ int irq;
+ unsigned int id;
+ } intf[INTEL_GSC_NUM_INTERFACES];
+};
+
+void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *dev_priv);
+void intel_gsc_fini(struct intel_gsc *gsc);
+void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir);
+
+#endif /* __INTEL_GSC_DEV_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 8a2483ccbfb9..92394f13b42f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -4,7 +4,6 @@
*/
#include <drm/drm_managed.h>
-#include <drm/intel-gtt.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
@@ -17,6 +16,7 @@
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_debugfs.h"
+#include "intel_gt_gmch.h"
#include "intel_gt_pm.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
@@ -26,10 +26,11 @@
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
+#include "intel_gt_sysfs.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+static void __intel_gt_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
@@ -51,17 +52,23 @@ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_rps_init_early(&gt->rps);
}
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+/* Preliminary initialization of Tile 0 */
+void intel_root_gt_init_early(struct drm_i915_private *i915)
{
+ struct intel_gt *gt = to_gt(i915);
+
gt->i915 = i915;
gt->uncore = &i915->uncore;
+
+ __intel_gt_init_early(gt);
}
-int intel_gt_probe_lmem(struct intel_gt *gt)
+static int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
+ unsigned int instance = gt->info.id;
+ int id = INTEL_REGION_LMEM_0 + instance;
struct intel_memory_region *mem;
- int id;
int err;
mem = intel_gt_setup_lmem(gt);
@@ -76,9 +83,8 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
return err;
}
- id = INTEL_REGION_LMEM;
-
mem->id = id;
+ mem->instance = instance;
intel_memory_region_set_name(mem, "local%u", mem->instance);
@@ -96,6 +102,12 @@ int intel_gt_assign_ggtt(struct intel_gt *gt)
return gt->ggtt ? 0 : -ENOMEM;
}
+static const char * const intel_steering_types[] = {
+ "L3BANK",
+ "MSLICE",
+ "LNCF",
+};
+
static const struct intel_mmio_range icl_l3bank_steering_table[] = {
{ 0x00B100, 0x00B3FF },
{},
@@ -439,14 +451,17 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
if (GRAPHICS_VER(gt->i915) < 6)
- intel_gtt_chipset_flush();
+ intel_gt_gmch_gen5_chipset_flush(gt);
}
void intel_gt_driver_register(struct intel_gt *gt)
{
+ intel_gsc_init(&gt->gsc, gt->i915);
+
intel_rps_driver_register(&gt->rps);
intel_gt_debugfs_register(gt);
+ intel_gt_sysfs_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -712,6 +727,11 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_uc_init;
+ err = intel_gt_init_hwconfig(gt);
+ if (err)
+ drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
+ ERR_PTR(err));
+
err = __engines_record_defaults(gt);
if (err)
goto err_gt;
@@ -766,6 +786,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_wakeref_t wakeref;
intel_rps_driver_unregister(&gt->rps);
+ intel_gsc_fini(&gt->gsc);
intel_pxp_fini(&gt->pxp);
@@ -793,18 +814,24 @@ void intel_gt_driver_release(struct intel_gt *gt)
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
+ intel_gt_fini_hwconfig(gt);
}
-void intel_gt_driver_late_release(struct intel_gt *gt)
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
{
+ struct intel_gt *gt;
+ unsigned int id;
+
/* We need to wait for inflight RCU frees to release their grip */
rcu_barrier();
- intel_uc_driver_late_release(&gt->uc);
- intel_gt_fini_requests(gt);
- intel_gt_fini_reset(gt);
- intel_gt_fini_timelines(gt);
- intel_engines_free(gt);
+ for_each_gt(gt, i915, id) {
+ intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
+ intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ intel_engines_free(gt);
+ }
}
/**
@@ -913,6 +940,35 @@ u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
return intel_uncore_read_fw(gt->uncore, reg);
}
+/**
+ * intel_gt_get_valid_steering_for_reg - get a valid steering for a register
+ * @gt: GT structure
+ * @reg: register for which the steering is required
+ * @sliceid: return variable for slice steering
+ * @subsliceid: return variable for subslice steering
+ *
+ * This function returns a slice/subslice pair that is guaranteed to work for
+ * read steering of the given register. Note that a value will be returned even
+ * if the register is not replicated and therefore does not actually require
+ * steering.
+ */
+void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
+ u8 *sliceid, u8 *subsliceid)
+{
+ int type;
+
+ for (type = 0; type < NUM_STEERING_TYPES; type++) {
+ if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
+ intel_gt_get_valid_steering(gt, type, sliceid,
+ subsliceid);
+ return;
+ }
+ }
+
+ *sliceid = gt->default_steering.groupid;
+ *subsliceid = gt->default_steering.instanceid;
+}
+
u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg)
{
int type;
@@ -932,6 +988,145 @@ u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg)
return intel_uncore_read(gt->uncore, reg);
}
+static void report_steering_type(struct drm_printer *p,
+ struct intel_gt *gt,
+ enum intel_steering_type type,
+ bool dump_table)
+{
+ const struct intel_mmio_range *entry;
+ u8 slice, subslice;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
+
+ if (!gt->steering_table[type]) {
+ drm_printf(p, "%s steering: uses default steering\n",
+ intel_steering_types[type]);
+ return;
+ }
+
+ intel_gt_get_valid_steering(gt, type, &slice, &subslice);
+ drm_printf(p, "%s steering: sliceid=0x%x, subsliceid=0x%x\n",
+ intel_steering_types[type], slice, subslice);
+
+ if (!dump_table)
+ return;
+
+ for (entry = gt->steering_table[type]; entry->end; entry++)
+ drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
+}
+
+void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table)
+{
+ drm_printf(p, "Default steering: sliceid=0x%x, subsliceid=0x%x\n",
+ gt->default_steering.groupid,
+ gt->default_steering.instanceid);
+
+ if (HAS_MSLICES(gt->i915)) {
+ report_steering_type(p, gt, MSLICE, dump_table);
+ report_steering_type(p, gt, LNCF, dump_table);
+ }
+}
+
+static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
+{
+ int ret;
+
+ if (!gt_is_root(gt)) {
+ struct intel_uncore_mmio_debug *mmio_debug;
+ struct intel_uncore *uncore;
+
+ uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
+ if (!mmio_debug) {
+ kfree(uncore);
+ return -ENOMEM;
+ }
+
+ gt->uncore = uncore;
+ gt->uncore->debug = mmio_debug;
+
+ __intel_gt_init_early(gt);
+ }
+
+ intel_uncore_init_early(gt->uncore, gt);
+
+ ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
+ if (ret)
+ return ret;
+
+ gt->phys_addr = phys_addr;
+
+ return 0;
+}
+
+static void
+intel_gt_tile_cleanup(struct intel_gt *gt)
+{
+ intel_uncore_cleanup_mmio(gt->uncore);
+
+ if (!gt_is_root(gt)) {
+ kfree(gt->uncore->debug);
+ kfree(gt->uncore);
+ kfree(gt);
+ }
+}
+
+int intel_gt_probe_all(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_gt *gt = &i915->gt0;
+ phys_addr_t phys_addr;
+ unsigned int mmio_bar;
+ int ret;
+
+ mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
+ phys_addr = pci_resource_start(pdev, mmio_bar);
+
+ /*
+ * We always have at least one primary GT on any device
+ * and it has been already initialized early during probe
+ * in i915_driver_probe()
+ */
+ ret = intel_gt_tile_setup(gt, phys_addr);
+ if (ret)
+ return ret;
+
+ i915->gt[0] = gt;
+
+ /* TODO: add more tiles */
+ return 0;
+}
+
+int intel_gt_tiles_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_gt_probe_lmem(gt);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_gt_release_all(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, i915, id) {
+ intel_gt_tile_cleanup(gt);
+ i915->gt[id] = NULL;
+ }
+}
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 0f571c8ee22b..44c6cb63ccbc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -13,12 +13,24 @@
struct drm_i915_private;
struct drm_printer;
+struct insert_entries {
+ struct i915_address_space *vm;
+ struct i915_vma_resource *vma_res;
+ enum i915_cache_level level;
+ u32 flags;
+};
+
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
+static inline bool gt_is_root(struct intel_gt *gt)
+{
+ return !gt->info.id;
+}
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -34,10 +46,13 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
return container_of(huc, struct intel_gt, uc.huc);
}
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
+{
+ return container_of(gsc, struct intel_gt, gsc);
+}
+
+void intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
-int intel_gt_probe_lmem(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
int intel_gt_init(struct intel_gt *gt);
@@ -47,7 +62,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
-void intel_gt_driver_late_release(struct intel_gt *gt);
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
@@ -84,9 +99,25 @@ static inline bool intel_gt_needs_read_steering(struct intel_gt *gt,
return gt->steering_table[type];
}
+void intel_gt_get_valid_steering_for_reg(struct intel_gt *gt, i915_reg_t reg,
+ u8 *sliceid, u8 *subsliceid);
+
u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg);
u32 intel_gt_read_register(struct intel_gt *gt, i915_reg_t reg);
+void intel_gt_report_steering(struct drm_printer *p, struct intel_gt *gt,
+ bool dump_table);
+
+int intel_gt_probe_all(struct drm_i915_private *i915);
+int intel_gt_tiles_init(struct drm_i915_private *i915);
+void intel_gt_release_all(struct drm_i915_private *i915);
+
+#define for_each_gt(gt__, i915__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_MAX_GT; \
+ (id__)++) \
+ for_each_if(((gt__) = (i915__)->gt[(id__)]))
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
@@ -94,4 +125,6 @@ void intel_gt_watchdog_work(struct work_struct *work);
void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+struct resource intel_pci_resource(struct pci_dev *pdev, int bar);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 0db822c3b7e5..d5d1b04dbcad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -161,6 +161,10 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
if (gt->clock_frequency)
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
+ /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
+ if (GRAPHICS_VER(gt->i915) == 11)
+ gt->clock_period_ns = NSEC_PER_SEC / 13750000;
+
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
gt->clock_frequency / 1000,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index f103664b71d4..d886fdc2c694 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -6,6 +6,7 @@
#include <linux/debugfs.h>
#include "i915_drv.h"
+#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
#include "intel_gt_pm_debugfs.h"
@@ -29,7 +30,7 @@ int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
}
}
-int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
+void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
{
/* Flush any previous reset before applying for a new one */
wait_event(gt->reset.queue,
@@ -37,7 +38,6 @@ int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE,
"Manually reset engine mask to %llx", val);
- return 0;
}
/*
@@ -51,16 +51,30 @@ static int __intel_gt_debugfs_reset_show(void *data, u64 *val)
static int __intel_gt_debugfs_reset_store(void *data, u64 val)
{
- return intel_gt_debugfs_reset_store(data, val);
+ intel_gt_debugfs_reset_store(data, val);
+
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show,
__intel_gt_debugfs_reset_store, "%llu\n");
+static int steering_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_gt *gt = m->private;
+
+ intel_gt_report_steering(&p, gt, true);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(steering);
+
static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "reset", &reset_fops, NULL },
+ { "steering", &steering_fops },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
index 17e79b735cfe..e4110eebf093 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -48,6 +48,6 @@ void intel_gt_debugfs_register_files(struct dentry *root,
/* functions that need to be accessed by the upper level non-gt interfaces */
int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val);
-int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
+void intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
#endif /* INTEL_GT_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.c b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
new file mode 100644
index 000000000000..18e488672d1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_gmch.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/intel-gtt.h>
+#include <drm/i915_drm.h>
+
+#include <linux/agp_backend.h>
+#include <linux/stop_machine.h>
+
+#include "i915_drv.h"
+#include "intel_gt_gmch.h"
+#include "intel_gt_regs.h"
+#include "intel_gt.h"
+#include "i915_utils.h"
+
+#include "gen8_ppgtt.h"
+
+struct insert_page {
+ struct i915_address_space *vm;
+ dma_addr_t addr;
+ u64 offset;
+ enum i915_cache_level level;
+};
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void nop_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
+ break;
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ MISSING_CASE(level);
+ }
+
+ return pte;
+}
+
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
+
+ if (level != I915_CACHE_NONE)
+ pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
+
+ return pte;
+}
+
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ if (level != I915_CACHE_NONE)
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE3;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE3;
+ break;
+ }
+
+ return pte;
+}
+
+static void gen5_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+}
+
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *pte =
+ (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ iowrite32(vm->pte_encode(addr, level, flags), pte);
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
+
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+
+ ggtt->invalidate(ggtt);
+}
+
+static void gen5_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+ AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+ intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
+ flags);
+}
+
+/*
+ * Binds an object into the global gtt with the specified cache level.
+ * The object will be accessible to the GPU via commands whose operands
+ * reference offsets within the global GTT as well as accessible by the GPU
+ * through the GMADR mapped BAR (i915->mm.gtt->gtt).
+ */
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
+ dma_addr_t addr;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma_res->start / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ /*
+ * We want to flush the TLBs only after we're certain all the PTE
+ * updates have finished.
+ */
+ ggtt->invalidate(ggtt);
+}
+
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+ /*
+ * Make sure the internal GAM fifo has been cleared of all GTT
+ * writes before exiting stop_machine(). This guarantees that
+ * any aperture accesses waiting to start in another process
+ * cannot back up behind the GTT writes causing a hang.
+ * The register can be any arbitrary GAM register.
+ */
+ intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
+}
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+ struct insert_page *arg = _arg;
+
+ gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 unused)
+{
+ struct insert_page arg = { vm, addr, offset, level };
+
+ stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+ struct insert_entries *arg = _arg;
+
+ gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
+ bxt_vtd_ggtt_wa(arg->vm);
+
+ return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct insert_entries arg = { vm, vma_res, level, flags };
+
+ stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
+{
+ intel_gtt_chipset_flush();
+}
+
+static void gen5_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
+}
+
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ gen6_pte_t scratch_pte, __iomem *gtt_base =
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ scratch_pte = vm->scratch[0]->encode;
+ for (i = 0; i < num_entries; i++)
+ iowrite32(scratch_pte, &gtt_base[i]);
+}
+
+static void gen8_ggtt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t __iomem *gtt_base =
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+ int i;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ for (i = 0; i < num_entries; i++)
+ gen8_set_pte(&gtt_base[i], scratch_pte);
+}
+
+static void gen5_gmch_remove(struct i915_address_space *vm)
+{
+ intel_gmch_remove();
+}
+
+static void gen6_gmch_remove(struct i915_address_space *vm)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ iounmap(ggtt->gsm);
+ free_scratch(vm);
+}
+
+/*
+ * Certain Gen5 chipsets require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static bool needs_idle_maps(struct drm_i915_private *i915)
+{
+ /*
+ * Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if (!i915_vtd_active(i915))
+ return false;
+
+ if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
+ return true;
+
+ if (GRAPHICS_VER(i915) == 12)
+ return true; /* XXX DMAR fault reason 7 */
+
+ return false;
+}
+
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+ snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+ snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+ return snb_gmch_ctl << 20;
+}
+
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+{
+ bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
+ bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
+ if (bdw_gmch_ctl)
+ bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+
+#ifdef CONFIG_X86_32
+ /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
+ if (bdw_gmch_ctl > 4)
+ bdw_gmch_ctl = 4;
+#endif
+
+ return bdw_gmch_ctl << 20;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
+static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ phys_addr_t phys_addr;
+ u32 pte_flags;
+ int ret;
+
+ GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
+
+ /*
+ * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+ * will be dropped. For WC mappings in general we have 64 byte burst
+ * writes when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
+ ggtt->gsm = ioremap(phys_addr, size);
+ else
+ ggtt->gsm = ioremap_wc(phys_addr, size);
+ if (!ggtt->gsm) {
+ drm_err(&i915->drm, "Failed to map the ggtt page table\n");
+ return -ENOMEM;
+ }
+
+ kref_init(&ggtt->vm.resv_ref);
+ ret = setup_scratch_page(&ggtt->vm);
+ if (ret) {
+ drm_err(&i915->drm, "Scratch setup failed\n");
+ /* iounmap will also get called at remove, but meh */
+ iounmap(ggtt->gsm);
+ return ret;
+ }
+
+ pte_flags = 0;
+ if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.scratch[0]->encode =
+ ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
+ I915_CACHE_NONE, pte_flags);
+
+ return 0;
+}
+
+int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ phys_addr_t gmadr_base;
+ int ret;
+
+ ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ if (!ret) {
+ drm_err(&i915->drm, "failed to set up gmch\n");
+ return -EIO;
+ }
+
+ intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
+
+ ggtt->gmadr =
+ (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ if (needs_idle_maps(i915)) {
+ drm_notice(&i915->drm,
+ "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
+ ggtt->do_idle_maps = true;
+ }
+
+ ggtt->vm.insert_page = gen5_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen5_ggtt_insert_entries;
+ ggtt->vm.clear_range = gen5_ggtt_clear_range;
+ ggtt->vm.cleanup = gen5_gmch_remove;
+
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ if (unlikely(ggtt->do_idle_maps))
+ drm_notice(&i915->drm,
+ "Applying Ironlake quirks for intel_iommu\n");
+
+ return 0;
+}
+
+int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ ggtt->gmadr = intel_pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+
+ /*
+ * 64/512MB is the current min/max we actually know of, but this is
+ * just a coarse sanity check.
+ */
+ if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
+ drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
+ &ggtt->mappable_end);
+ return -ENXIO;
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+
+ size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+
+ ggtt->vm.clear_range = nop_clear_range;
+ if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen6_ggtt_clear_range;
+ ggtt->vm.insert_page = gen6_ggtt_insert_page;
+ ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
+ if (HAS_EDRAM(i915))
+ ggtt->vm.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(i915))
+ ggtt->vm.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(i915))
+ ggtt->vm.pte_encode = byt_pte_encode;
+ else if (GRAPHICS_VER(i915) >= 7)
+ ggtt->vm.pte_encode = ivb_pte_encode;
+ else
+ ggtt->vm.pte_encode = snb_pte_encode;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+{
+ gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
+ gmch_ctrl &= SNB_GMCH_GGMS_MASK;
+
+ if (gmch_ctrl)
+ return 1 << (20 + gmch_ctrl);
+
+ return 0;
+}
+
+int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int size;
+ u16 snb_gmch_ctl;
+
+ /* TODO: We're not aware of mappable constraints on gen8 yet */
+ if (!HAS_LMEM(i915)) {
+ ggtt->gmadr = intel_pci_resource(pdev, 2);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
+ }
+
+ pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ if (IS_CHERRYVIEW(i915))
+ size = chv_get_total_gtt_size(snb_gmch_ctl);
+ else
+ size = gen8_get_total_gtt_size(snb_gmch_ctl);
+
+ ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
+
+ ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ ggtt->vm.cleanup = gen6_gmch_remove;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page;
+ ggtt->vm.clear_range = nop_clear_range;
+ if (intel_scanout_needs_vtd_wa(i915))
+ ggtt->vm.clear_range = gen8_ggtt_clear_range;
+
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
+
+ /*
+ * Serialize GTT updates with aperture access on BXT if VT-d is on,
+ * and always on CHV.
+ */
+ if (intel_vm_no_concurrent_access_wa(i915)) {
+ ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+ ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ }
+
+ ggtt->invalidate = gen8_ggtt_invalidate;
+
+ ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
+ ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
+
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+
+ setup_private_pat(ggtt->vm.gt->uncore);
+
+ return ggtt_probe_common(ggtt, size);
+}
+
+int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
+{
+ if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_gmch.h b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
new file mode 100644
index 000000000000..75ed55c1f30a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_gmch.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_GMCH_H__
+#define __INTEL_GT_GMCH_H__
+
+#include "intel_gtt.h"
+
+/* For x86 platforms */
+#if IS_ENABLED(CONFIG_X86)
+void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt);
+int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt);
+int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt);
+int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt);
+int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915);
+
+/* Stubs for non-x86 platforms */
+#else
+static inline void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
+{
+}
+static inline int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
+{
+ /* No HW should be probed for this case yet, return fail */
+ return -ENODEV;
+}
+static inline int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
+{
+ /* No HW should be probed for this case yet, return fail */
+ return -ENODEV;
+}
+static inline int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
+{
+ /* No HW should be probed for this case yet, return fail */
+ return -ENODEV;
+}
+static inline int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
+{
+ /* No HW should be enabled for this case yet, return fail */
+ return -ENODEV;
+}
+#endif
+
+#endif /* __INTEL_GT_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index e443ac4c8059..88b4becfcb17 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -68,6 +68,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(&gt->pxp, iir);
+ if (instance == OTHER_GSC_INSTANCE)
+ return intel_gsc_irq_handler(gt, iir);
+
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
@@ -184,6 +187,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
@@ -201,6 +206,8 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
@@ -215,6 +222,7 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
u32 irqs = GT_RENDER_USER_INTERRUPT;
+ const u32 gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
u32 dmask;
u32 smask;
@@ -233,6 +241,9 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
if (CCS_MASK(gt))
intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE,
+ gsc_mask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
@@ -250,6 +261,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
+ if (HAS_HECI_GSC(gt->i915))
+ intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index e4ecc17889d3..f553e2173bda 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -129,7 +129,14 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
+ /*
+ * We access the runtime_pm structure via gt->i915 here rather than
+ * gt->uncore as we do elsewhere in the file because gt->uncore is not
+ * yet initialized for all tiles at this point in the driver startup.
+ * runtime_pm is per-device rather than per-tile, so this is still the
+ * correct structure.
+ */
+ intel_wakeref_init(&gt->wakeref, &gt->i915->runtime_pm, &wf_ops);
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -175,15 +182,16 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
if (intel_gt_is_wedged(gt))
intel_gt_unset_wedged(gt);
- for_each_engine(engine, gt, id)
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
+ for_each_engine(engine, gt, id) {
if (engine->reset.prepare)
engine->reset.prepare(engine);
- intel_uc_reset_prepare(&gt->uc);
-
- for_each_engine(engine, gt, id)
if (engine->sanitize)
engine->sanitize(engine);
+ }
if (reset_engines(gt) || force) {
for_each_engine(engine, gt, id)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 437e96bb3b93..0c6b9eb724ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -24,38 +24,38 @@
#include "intel_uncore.h"
#include "vlv_sideband.h"
-int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
+void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
intel_gt_pm_get(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
-
- return 0;
}
-int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
+void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
intel_gt_pm_put(gt);
atomic_dec(&gt->user_wakeref);
-
- return 0;
}
static int forcewake_user_open(struct inode *inode, struct file *file)
{
struct intel_gt *gt = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_open(gt);
+ intel_gt_pm_debugfs_forcewake_user_open(gt);
+
+ return 0;
}
static int forcewake_user_release(struct inode *inode, struct file *file)
{
struct intel_gt *gt = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_release(gt);
+ intel_gt_pm_debugfs_forcewake_user_release(gt);
+
+ return 0;
}
static const struct file_operations forcewake_user_fops = {
@@ -342,17 +342,16 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
} else if (GRAPHICS_VER(i915) >= 6) {
u32 rp_state_limits;
u32 gt_perf_status;
- u32 rp_state_cap;
+ struct intel_rps_freq_caps caps;
u32 rpmodectl, rpinclimit, rpdeclimit;
u32 rpstat, cagf, reqf;
u32 rpcurupei, rpcurup, rpprevup;
u32 rpcurdownei, rpcurdown, rpprevdown;
u32 rpupei, rpupt, rpdownei, rpdownt;
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
- int max_freq;
rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
- rp_state_cap = intel_rps_read_state_cap(rps);
+ gen6_rps_get_freq_caps(rps, &caps);
if (IS_GEN9_LP(i915))
gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
else
@@ -474,25 +473,12 @@ void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
- max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
- rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(i915) ||
- GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(i915) ||
- GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
+ intel_gpu_freq(rps, caps.min_freq));
drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
- rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(i915) ||
- GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
+ intel_gpu_freq(rps, caps.rp1_freq));
drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
+ intel_gpu_freq(rps, caps.rp0_freq));
drm_printf(p, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(rps, rps->max_freq));
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
index a8457887ec65..0ace8c2da0ac 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
@@ -14,7 +14,7 @@ void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root);
void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m);
/* functions that need to be accessed by the upper level non-gt interfaces */
-int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
-int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
+void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
+void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
#endif /* INTEL_GT_PM_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 201b507c9dde..a39718a40cc3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -46,6 +46,7 @@
#define GEN8_MCR_SLICE_MASK GEN8_MCR_SLICE(3)
#define GEN8_MCR_SUBSLICE(subslice) (((subslice) & 3) << 24)
#define GEN8_MCR_SUBSLICE_MASK GEN8_MCR_SUBSLICE(3)
+#define GEN11_MCR_MULTICAST REG_BIT(31)
#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
#define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf)
#define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24)
@@ -840,6 +841,24 @@
#define CTC_SHIFT_PARAMETER_SHIFT 1
#define CTC_SHIFT_PARAMETER_MASK (0x3 << CTC_SHIFT_PARAMETER_SHIFT)
+/* GPM MSG_IDLE */
+#define MSG_IDLE_CS _MMIO(0x8000)
+#define MSG_IDLE_VCS0 _MMIO(0x8004)
+#define MSG_IDLE_VCS1 _MMIO(0x8008)
+#define MSG_IDLE_BCS _MMIO(0x800C)
+#define MSG_IDLE_VECS0 _MMIO(0x8010)
+#define MSG_IDLE_VCS2 _MMIO(0x80C0)
+#define MSG_IDLE_VCS3 _MMIO(0x80C4)
+#define MSG_IDLE_VCS4 _MMIO(0x80C8)
+#define MSG_IDLE_VCS5 _MMIO(0x80CC)
+#define MSG_IDLE_VCS6 _MMIO(0x80D0)
+#define MSG_IDLE_VCS7 _MMIO(0x80D4)
+#define MSG_IDLE_VECS1 _MMIO(0x80D8)
+#define MSG_IDLE_VECS2 _MMIO(0x80DC)
+#define MSG_IDLE_VECS3 _MMIO(0x80E0)
+#define MSG_IDLE_FW_MASK REG_GENMASK(13, 9)
+#define MSG_IDLE_FW_SHIFT 9
+
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
@@ -1087,6 +1106,7 @@
#define EU_PERF_CNTL3 _MMIO(0xe758)
#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8)
+#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15)
#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4)
#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32)
@@ -1482,6 +1502,7 @@
#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
#define OTHER_KCR_INSTANCE 4
+#define OTHER_GSC_INSTANCE 6
#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
new file mode 100644
index 000000000000..8ec8bc660c8c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/printk.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_sysfs.h"
+#include "intel_gt.h"
+#include "intel_gt_sysfs.h"
+#include "intel_gt_sysfs_pm.h"
+#include "intel_gt_types.h"
+#include "intel_rc6.h"
+
+bool is_object_gt(struct kobject *kobj)
+{
+ return !strncmp(kobj->name, "gt", 2);
+}
+
+static struct intel_gt *kobj_to_gt(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_gt, base)->gt;
+}
+
+struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
+ const char *name)
+{
+ struct kobject *kobj = &dev->kobj;
+
+ /*
+ * We are interested at knowing from where the interface
+ * has been called, whether it's called from gt/ or from
+ * the parent directory.
+ * From the interface position it depends also the value of
+ * the private data.
+ * If the interface is called from gt/ then private data is
+ * of the "struct intel_gt *" type, otherwise it's * a
+ * "struct drm_i915_private *" type.
+ */
+ if (!is_object_gt(kobj)) {
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ return to_gt(i915);
+ }
+
+ return kobj_to_gt(kobj);
+}
+
+static struct kobject *gt_get_parent_obj(struct intel_gt *gt)
+{
+ return &gt->i915->drm.primary->kdev->kobj;
+}
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+
+ return sysfs_emit(buf, "%u\n", gt->info.id);
+}
+static DEVICE_ATTR_RO(id);
+
+static struct attribute *id_attrs[] = {
+ &dev_attr_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(id);
+
+static void kobj_gt_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_gt_type = {
+ .release = kobj_gt_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = id_groups,
+};
+
+void intel_gt_sysfs_register(struct intel_gt *gt)
+{
+ struct kobj_gt *kg;
+
+ /*
+ * We need to make things right with the
+ * ABI compatibility. The files were originally
+ * generated under the parent directory.
+ *
+ * We generate the files only for gt 0
+ * to avoid duplicates.
+ */
+ if (gt_is_root(gt))
+ intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
+
+ kg = kzalloc(sizeof(*kg), GFP_KERNEL);
+ if (!kg)
+ goto exit_fail;
+
+ kobject_init(&kg->base, &kobj_gt_type);
+ kg->gt = gt;
+
+ /* xfer ownership to sysfs tree */
+ if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id))
+ goto exit_kobj_put;
+
+ intel_gt_sysfs_pm_init(gt, &kg->base);
+
+ return;
+
+exit_kobj_put:
+ kobject_put(&kg->base);
+
+exit_fail:
+ drm_warn(&gt->i915->drm,
+ "failed to initialize gt%d sysfs root\n", gt->info.id);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
new file mode 100644
index 000000000000..9471b26752cf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SYSFS_GT_H__
+#define __SYSFS_GT_H__
+
+#include <linux/ctype.h>
+#include <linux/kobject.h>
+
+#include "i915_gem.h" /* GEM_BUG_ON() */
+
+struct intel_gt;
+
+struct kobj_gt {
+ struct kobject base;
+ struct intel_gt *gt;
+};
+
+bool is_object_gt(struct kobject *kobj);
+
+struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
+
+struct kobject *
+intel_gt_create_kobj(struct intel_gt *gt,
+ struct kobject *dir,
+ const char *name);
+
+void intel_gt_sysfs_register(struct intel_gt *gt);
+struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
+ const char *name);
+
+#endif /* SYSFS_GT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
new file mode 100644
index 000000000000..26cbfa6477d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <drm/drm_device.h>
+#include <linux/sysfs.h>
+#include <linux/printk.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_sysfs.h"
+#include "intel_gt.h"
+#include "intel_gt_regs.h"
+#include "intel_gt_sysfs.h"
+#include "intel_gt_sysfs_pm.h"
+#include "intel_rc6.h"
+#include "intel_rps.h"
+
+#ifdef CONFIG_PM
+enum intel_gt_sysfs_op {
+ INTEL_GT_SYSFS_MIN = 0,
+ INTEL_GT_SYSFS_MAX,
+};
+
+static int
+sysfs_gt_attribute_w_func(struct device *dev, struct device_attribute *attr,
+ int (func)(struct intel_gt *gt, u32 val), u32 val)
+{
+ struct intel_gt *gt;
+ int ret;
+
+ if (!is_object_gt(&dev->kobj)) {
+ int i;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ for_each_gt(gt, i915, i) {
+ ret = func(gt, val);
+ if (ret)
+ break;
+ }
+ } else {
+ gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ ret = func(gt, val);
+ }
+
+ return ret;
+}
+
+static u32
+sysfs_gt_attribute_r_func(struct device *dev, struct device_attribute *attr,
+ u32 (func)(struct intel_gt *gt),
+ enum intel_gt_sysfs_op op)
+{
+ struct intel_gt *gt;
+ u32 ret;
+
+ ret = (op == INTEL_GT_SYSFS_MAX) ? 0 : (u32) -1;
+
+ if (!is_object_gt(&dev->kobj)) {
+ int i;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(dev);
+
+ for_each_gt(gt, i915, i) {
+ u32 val = func(gt);
+
+ switch (op) {
+ case INTEL_GT_SYSFS_MIN:
+ if (val < ret)
+ ret = val;
+ break;
+
+ case INTEL_GT_SYSFS_MAX:
+ if (val > ret)
+ ret = val;
+ break;
+ }
+ }
+ } else {
+ gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ ret = func(gt);
+ }
+
+ return ret;
+}
+
+/* RC6 interfaces will show the minimum RC6 residency value */
+#define sysfs_gt_attribute_r_min_func(d, a, f) \
+ sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MIN)
+
+/* Frequency interfaces will show the maximum frequency value */
+#define sysfs_gt_attribute_r_max_func(d, a, f) \
+ sysfs_gt_attribute_r_func(d, a, f, INTEL_GT_SYSFS_MAX)
+
+static u32 get_residency(struct intel_gt *gt, i915_reg_t reg)
+{
+ intel_wakeref_t wakeref;
+ u64 res = 0;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ res = intel_rc6_residency_us(&gt->rc6, reg);
+
+ return DIV_ROUND_CLOSEST_ULL(res, 1000);
+}
+
+static ssize_t rc6_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u8 mask = 0;
+
+ if (HAS_RC6(gt->i915))
+ mask |= BIT(0);
+ if (HAS_RC6p(gt->i915))
+ mask |= BIT(1);
+ if (HAS_RC6pp(gt->i915))
+ mask |= BIT(2);
+
+ return sysfs_emit(buff, "%x\n", mask);
+}
+
+static u32 __rc6_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6);
+}
+
+static ssize_t rc6_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __rc6_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6_residency);
+}
+
+static u32 __rc6p_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6p);
+}
+
+static ssize_t rc6p_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6p_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __rc6p_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6p_residency);
+}
+
+static u32 __rc6pp_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, GEN6_GT_GFX_RC6pp);
+}
+
+static ssize_t rc6pp_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6pp_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __rc6pp_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6pp_residency);
+}
+
+static u32 __media_rc6_residency_ms_show(struct intel_gt *gt)
+{
+ return get_residency(gt, VLV_GT_MEDIA_RC6);
+}
+
+static ssize_t media_rc6_residency_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 rc6_residency = sysfs_gt_attribute_r_min_func(dev, attr,
+ __media_rc6_residency_ms_show);
+
+ return sysfs_emit(buff, "%u\n", rc6_residency);
+}
+
+static DEVICE_ATTR_RO(rc6_enable);
+static DEVICE_ATTR_RO(rc6_residency_ms);
+static DEVICE_ATTR_RO(rc6p_residency_ms);
+static DEVICE_ATTR_RO(rc6pp_residency_ms);
+static DEVICE_ATTR_RO(media_rc6_residency_ms);
+
+static struct attribute *rc6_attrs[] = {
+ &dev_attr_rc6_enable.attr,
+ &dev_attr_rc6_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *rc6p_attrs[] = {
+ &dev_attr_rc6p_residency_ms.attr,
+ &dev_attr_rc6pp_residency_ms.attr,
+ NULL
+};
+
+static struct attribute *media_rc6_attrs[] = {
+ &dev_attr_media_rc6_residency_ms.attr,
+ NULL
+};
+
+static const struct attribute_group rc6_attr_group[] = {
+ { .attrs = rc6_attrs, },
+ { .name = power_group_name, .attrs = rc6_attrs, },
+};
+
+static const struct attribute_group rc6p_attr_group[] = {
+ { .attrs = rc6p_attrs, },
+ { .name = power_group_name, .attrs = rc6p_attrs, },
+};
+
+static const struct attribute_group media_rc6_attr_group[] = {
+ { .attrs = media_rc6_attrs, },
+ { .name = power_group_name, .attrs = media_rc6_attrs, },
+};
+
+static int __intel_gt_sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return is_object_gt(kobj) ?
+ sysfs_create_group(kobj, &grp[0]) :
+ sysfs_merge_group(kobj, &grp[1]);
+}
+
+static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ int ret;
+
+ if (!HAS_RC6(gt->i915))
+ return;
+
+ ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RC6 sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+
+ /*
+ * cannot use the is_visible() attribute because
+ * the upper object inherits from the parent group.
+ */
+ if (HAS_RC6p(gt->i915)) {
+ ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RC6p sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
+ ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create media %u RC6 sysfs files (%pe)\n",
+ gt->info.id, ERR_PTR(ret));
+ }
+}
+#else
+static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
+{
+}
+#endif /* CONFIG_PM */
+
+static u32 __act_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_read_actual_frequency(&gt->rps);
+}
+
+static ssize_t act_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 actual_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __act_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", actual_freq);
+}
+
+static u32 __cur_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_requested_frequency(&gt->rps);
+}
+
+static ssize_t cur_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 cur_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __cur_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", cur_freq);
+}
+
+static u32 __boost_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_boost_frequency(&gt->rps);
+}
+
+static ssize_t boost_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ u32 boost_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __boost_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", boost_freq);
+}
+
+static int __boost_freq_mhz_store(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_boost_frequency(&gt->rps, val);
+}
+
+static ssize_t boost_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ ssize_t ret;
+ u32 val;
+
+ ret = kstrtou32(buff, 0, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_gt_attribute_w_func(dev, attr,
+ __boost_freq_mhz_store, val) ?: count;
+}
+
+static u32 __rp0_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rp0_frequency(&gt->rps);
+}
+
+static ssize_t RP0_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rp0_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __rp0_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rp0_freq);
+}
+
+static u32 __rp1_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rp1_frequency(&gt->rps);
+}
+
+static ssize_t RP1_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rp1_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __rp1_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rp1_freq);
+}
+
+static u32 __rpn_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_rpn_frequency(&gt->rps);
+}
+
+static ssize_t RPn_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rpn_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __rpn_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rpn_freq);
+}
+
+static u32 __max_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_max_frequency(&gt->rps);
+}
+
+static ssize_t max_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 max_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __max_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", max_freq);
+}
+
+static int __set_max_freq(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_max_frequency(&gt->rps, val);
+}
+
+static ssize_t max_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 val;
+
+ ret = kstrtou32(buff, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = sysfs_gt_attribute_w_func(dev, attr, __set_max_freq, val);
+
+ return ret ?: count;
+}
+
+static u32 __min_freq_mhz_show(struct intel_gt *gt)
+{
+ return intel_rps_get_min_frequency(&gt->rps);
+}
+
+static ssize_t min_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 min_freq = sysfs_gt_attribute_r_min_func(dev, attr,
+ __min_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", min_freq);
+}
+
+static int __set_min_freq(struct intel_gt *gt, u32 val)
+{
+ return intel_rps_set_min_frequency(&gt->rps, val);
+}
+
+static ssize_t min_freq_mhz_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 val;
+
+ ret = kstrtou32(buff, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = sysfs_gt_attribute_w_func(dev, attr, __set_min_freq, val);
+
+ return ret ?: count;
+}
+
+static u32 __vlv_rpe_freq_mhz_show(struct intel_gt *gt)
+{
+ struct intel_rps *rps = &gt->rps;
+
+ return intel_gpu_freq(rps, rps->efficient_freq);
+}
+
+static ssize_t vlv_rpe_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ u32 rpe_freq = sysfs_gt_attribute_r_max_func(dev, attr,
+ __vlv_rpe_freq_mhz_show);
+
+ return sysfs_emit(buff, "%u\n", rpe_freq);
+}
+
+#define INTEL_GT_RPS_SYSFS_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_gt_##_name = __ATTR(gt_##_name, _mode, _show, _store); \
+ struct device_attribute dev_attr_rps_##_name = __ATTR(rps_##_name, _mode, _show, _store)
+
+#define INTEL_GT_RPS_SYSFS_ATTR_RO(_name) \
+ INTEL_GT_RPS_SYSFS_ATTR(_name, 0444, _name##_show, NULL)
+#define INTEL_GT_RPS_SYSFS_ATTR_RW(_name) \
+ INTEL_GT_RPS_SYSFS_ATTR(_name, 0644, _name##_show, _name##_store)
+
+static INTEL_GT_RPS_SYSFS_ATTR_RO(act_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RO(cur_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RW(boost_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RO(RP0_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RO(RP1_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RO(RPn_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RW(max_freq_mhz);
+static INTEL_GT_RPS_SYSFS_ATTR_RW(min_freq_mhz);
+
+static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
+
+#define GEN6_ATTR(s) { \
+ &dev_attr_##s##_act_freq_mhz.attr, \
+ &dev_attr_##s##_cur_freq_mhz.attr, \
+ &dev_attr_##s##_boost_freq_mhz.attr, \
+ &dev_attr_##s##_max_freq_mhz.attr, \
+ &dev_attr_##s##_min_freq_mhz.attr, \
+ &dev_attr_##s##_RP0_freq_mhz.attr, \
+ &dev_attr_##s##_RP1_freq_mhz.attr, \
+ &dev_attr_##s##_RPn_freq_mhz.attr, \
+ NULL, \
+ }
+
+#define GEN6_RPS_ATTR GEN6_ATTR(rps)
+#define GEN6_GT_ATTR GEN6_ATTR(gt)
+
+static const struct attribute * const gen6_rps_attrs[] = GEN6_RPS_ATTR;
+static const struct attribute * const gen6_gt_attrs[] = GEN6_GT_ATTR;
+
+static ssize_t punit_req_freq_mhz_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ u32 preq = intel_rps_read_punit_req_frequency(&gt->rps);
+
+ return sysfs_emit(buff, "%u\n", preq);
+}
+
+struct intel_gt_bool_throttle_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ i915_reg_t reg32;
+ u32 mask;
+};
+
+static ssize_t throttle_reason_bool_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buff)
+{
+ struct intel_gt *gt = intel_gt_sysfs_get_drvdata(dev, attr->attr.name);
+ struct intel_gt_bool_throttle_attr *t_attr =
+ (struct intel_gt_bool_throttle_attr *) attr;
+ bool val = rps_read_mask_mmio(&gt->rps, t_attr->reg32, t_attr->mask);
+
+ return sysfs_emit(buff, "%u\n", val);
+}
+
+#define INTEL_GT_RPS_BOOL_ATTR_RO(sysfs_func__, mask__) \
+struct intel_gt_bool_throttle_attr attr_##sysfs_func__ = { \
+ .attr = { .name = __stringify(sysfs_func__), .mode = 0444 }, \
+ .show = throttle_reason_bool_show, \
+ .reg32 = GT0_PERF_LIMIT_REASONS, \
+ .mask = mask__, \
+}
+
+static DEVICE_ATTR_RO(punit_req_freq_mhz);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_status, GT0_PERF_LIMIT_REASONS_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl1, POWER_LIMIT_1_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl2, POWER_LIMIT_2_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_pl4, POWER_LIMIT_4_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_thermal, THERMAL_LIMIT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_prochot, PROCHOT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_ratl, RATL_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_thermalert, VR_THERMALERT_MASK);
+static INTEL_GT_RPS_BOOL_ATTR_RO(throttle_reason_vr_tdc, VR_TDC_MASK);
+
+static const struct attribute *freq_attrs[] = {
+ &dev_attr_punit_req_freq_mhz.attr,
+ &attr_throttle_reason_status.attr,
+ &attr_throttle_reason_pl1.attr,
+ &attr_throttle_reason_pl2.attr,
+ &attr_throttle_reason_pl4.attr,
+ &attr_throttle_reason_thermal.attr,
+ &attr_throttle_reason_prochot.attr,
+ &attr_throttle_reason_ratl.attr,
+ &attr_throttle_reason_vr_thermalert.attr,
+ &attr_throttle_reason_vr_tdc.attr,
+ NULL
+};
+
+static int intel_sysfs_rps_init(struct intel_gt *gt, struct kobject *kobj,
+ const struct attribute * const *attrs)
+{
+ int ret;
+
+ if (GRAPHICS_VER(gt->i915) < 6)
+ return 0;
+
+ ret = sysfs_create_files(kobj, attrs);
+ if (ret)
+ return ret;
+
+ if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915))
+ ret = sysfs_create_file(kobj, &dev_attr_vlv_rpe_freq_mhz.attr);
+
+ return ret;
+}
+
+void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
+{
+ int ret;
+
+ intel_sysfs_rc6_init(gt, kobj);
+
+ ret = is_object_gt(kobj) ?
+ intel_sysfs_rps_init(gt, kobj, gen6_rps_attrs) :
+ intel_sysfs_rps_init(gt, kobj, gen6_gt_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u RPS sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+
+ /* end of the legacy interfaces */
+ if (!is_object_gt(kobj))
+ return;
+
+ ret = sysfs_create_files(kobj, freq_attrs);
+ if (ret)
+ drm_warn(&gt->i915->drm,
+ "failed to create gt%u throttle sysfs files (%pe)",
+ gt->info.id, ERR_PTR(ret));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h
new file mode 100644
index 000000000000..f567105a4a89
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __SYSFS_GT_PM_H__
+#define __SYSFS_GT_PM_H__
+
+#include <linux/kobject.h>
+
+#include "intel_gt_types.h"
+
+void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj);
+
+#endif /* SYSFS_RC6_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index f20687796490..b06611c1d4ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -16,10 +16,12 @@
#include <linux/workqueue.h>
#include "uc/intel_uc.h"
+#include "intel_gsc.h"
#include "i915_vma.h"
#include "intel_engine_types.h"
#include "intel_gt_buffer_pool_types.h"
+#include "intel_hwconfig.h"
#include "intel_llc_types.h"
#include "intel_reset_types.h"
#include "intel_rc6_types.h"
@@ -72,6 +74,7 @@ struct intel_gt {
struct i915_ggtt *ggtt;
struct intel_uc uc;
+ struct intel_gsc gsc;
struct mutex tlb_invalidate_lock;
@@ -182,7 +185,19 @@ struct intel_gt {
const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
+ struct {
+ u8 groupid;
+ u8 instanceid;
+ } default_steering;
+
+ /*
+ * Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
+ */
+ phys_addr_t phys_addr;
+
struct intel_gt_info {
+ unsigned int id;
+
intel_engine_mask_t engine_mask;
u32 l3bank_mask;
@@ -199,6 +214,9 @@ struct intel_gt {
struct sseu_dev_info sseu;
unsigned long mslice_mask;
+
+ /** @hwconfig: hardware configuration data */
+ struct intel_hwconfig hwconfig;
} info;
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index cc53b481a1c5..b67831833c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -109,32 +109,52 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
return 0;
}
-void __i915_vm_close(struct i915_address_space *vm)
+static void clear_vm_list(struct list_head *list)
{
struct i915_vma *vma, *vn;
- if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
- return;
-
- list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
+ list_for_each_entry_safe(vma, vn, list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
- if (!kref_get_unless_zero(&obj->base.refcount)) {
+ if (!i915_gem_object_get_rcu(obj)) {
/*
- * Unbind the dying vma to ensure the bound_list
+ * Object is dying, but has not yet cleared its
+ * vma list.
+ * Unbind the dying vma to ensure our list
* is completely drained. We leave the destruction to
- * the object destructor.
+ * the object destructor to avoid the vma
+ * disappearing under it.
*/
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
WARN_ON(__i915_vma_unbind(vma));
- continue;
+
+ /* Remove from the unbound list */
+ list_del_init(&vma->vm_link);
+
+ /*
+ * Delay the vm and vm mutex freeing until the
+ * object is done with destruction.
+ */
+ i915_vm_resv_get(vma->vm);
+ vma->vm_ddestroy = true;
+ } else {
+ i915_vma_destroy_locked(vma);
+ i915_gem_object_put(obj);
}
- /* Keep the obj (and hence the vma) alive as _we_ destroy it */
- i915_vma_destroy_locked(vma);
- i915_gem_object_put(obj);
}
+}
+
+static void __i915_vm_close(struct i915_address_space *vm)
+{
+ mutex_lock(&vm->mutex);
+
+ clear_vm_list(&vm->bound_list);
+ clear_vm_list(&vm->unbound_list);
+
+ /* Check for must-fix unanticipated side-effects */
GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
mutex_unlock(&vm->mutex);
}
@@ -156,7 +176,6 @@ int i915_vm_lock_objects(struct i915_address_space *vm,
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
- mutex_destroy(&vm->mutex);
}
/**
@@ -164,7 +183,8 @@ void i915_address_space_fini(struct i915_address_space *vm)
* @kref: Pointer to the &i915_address_space.resv_ref member.
*
* This function is called when the last lock sharer no longer shares the
- * &i915_address_space._resv lock.
+ * &i915_address_space._resv lock, and also if we raced when
+ * destroying a vma by the vma destruction
*/
void i915_vm_resv_release(struct kref *kref)
{
@@ -172,6 +192,8 @@ void i915_vm_resv_release(struct kref *kref)
container_of(kref, typeof(*vm), resv_ref);
dma_resv_fini(&vm->_resv);
+ mutex_destroy(&vm->mutex);
+
kfree(vm);
}
@@ -180,6 +202,8 @@ static void __i915_vm_release(struct work_struct *work)
struct i915_address_space *vm =
container_of(work, struct i915_address_space, release_work);
+ __i915_vm_close(vm);
+
/* Synchronize async unbinds. */
i915_vma_resource_bind_dep_sync_all(vm);
@@ -213,7 +237,6 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
vm->pending_unbind = RB_ROOT_CACHED;
INIT_WORK(&vm->release_work, __i915_vm_release);
- atomic_set(&vm->open, 1);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -258,6 +281,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
INIT_LIST_HEAD(&vm->bound_list);
+ INIT_LIST_HEAD(&vm->unbound_list);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
@@ -286,7 +310,7 @@ fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
void *vaddr = __px_vaddr(p);
memset64(vaddr, val, count);
- clflush_cache_range(vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(vaddr, PAGE_SIZE);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index ba7025388a5e..a40d928b3888 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -240,15 +240,6 @@ struct i915_address_space {
unsigned int bind_async_flags;
- /*
- * Each active user context has its own address space (in full-ppgtt).
- * Since the vm may be shared between multiple contexts, we count how
- * many contexts keep us "open". Once open hits zero, we are closed
- * and do not allow any new attachments, and proceed to shutdown our
- * vma and page directories.
- */
- atomic_t open;
-
struct mutex mutex; /* protects vma and our lists */
struct kref resv_ref; /* kref to keep the reservation lock alive. */
@@ -263,6 +254,11 @@ struct i915_address_space {
*/
struct list_head bound_list;
+ /**
+ * List of vmas not yet bound or evicted.
+ */
+ struct list_head unbound_list;
+
/* Global GTT */
bool is_ggtt:1;
@@ -272,6 +268,9 @@ struct i915_address_space {
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
+ /* Skip pte rewrite on unbind for suspend. Protected by @mutex */
+ bool skip_pte_rewrite:1;
+
u8 top;
u8 pd_shift;
u8 scratch_order;
@@ -448,6 +447,17 @@ i915_vm_get(struct i915_address_space *vm)
return vm;
}
+static inline struct i915_address_space *
+i915_vm_tryget(struct i915_address_space *vm)
+{
+ return kref_get_unless_zero(&vm->ref) ? vm : NULL;
+}
+
+static inline void assert_vm_alive(struct i915_address_space *vm)
+{
+ GEM_BUG_ON(!kref_read(&vm->ref));
+}
+
/**
* i915_vm_resv_get - Obtain a reference on the vm's reservation lock
* @vm: The vm whose reservation lock we want to share.
@@ -478,34 +488,6 @@ static inline void i915_vm_resv_put(struct i915_address_space *vm)
kref_put(&vm->resv_ref, i915_vm_resv_release);
}
-static inline struct i915_address_space *
-i915_vm_open(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- atomic_inc(&vm->open);
- return i915_vm_get(vm);
-}
-
-static inline bool
-i915_vm_tryopen(struct i915_address_space *vm)
-{
- if (atomic_add_unless(&vm->open, 1, 0))
- return i915_vm_get(vm);
-
- return false;
-}
-
-void __i915_vm_close(struct i915_address_space *vm);
-
-static inline void
-i915_vm_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(!atomic_read(&vm->open));
- __i915_vm_close(vm);
-
- i915_vm_put(vm);
-}
-
void i915_address_space_init(struct i915_address_space *vm, int subclass);
void i915_address_space_fini(struct i915_address_space *vm);
@@ -567,6 +549,14 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
+void intel_ggtt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ enum i915_cache_level cache_level,
+ u32 flags);
+void intel_ggtt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res);
+
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
@@ -637,6 +627,7 @@ release_pd_entry(struct i915_page_directory * const pd,
struct i915_page_table * const pt,
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
+void gen8_ggtt_invalidate(struct i915_ggtt *ggtt);
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
diff --git a/drivers/gpu/drm/i915/gt/intel_hwconfig.h b/drivers/gpu/drm/i915/gt/intel_hwconfig.h
new file mode 100644
index 000000000000..322290780b67
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_hwconfig.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_HWCONFIG_H_
+#define _INTEL_HWCONFIG_H_
+
+#include <linux/types.h>
+
+struct intel_gt;
+
+struct intel_hwconfig {
+ u32 size;
+ void *ptr;
+};
+
+int intel_gt_init_hwconfig(struct intel_gt *gt);
+void intel_gt_fini_hwconfig(struct intel_gt *gt);
+
+#endif /* _INTEL_HWCONFIG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 07bef7128fdb..3f83a9038e13 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -778,7 +778,7 @@ static void init_common_regs(u32 * const regs,
CTX_CTRL_RS_CTX_ENABLE);
regs[CTX_CONTEXT_CONTROL] = ctl;
- regs[CTX_TIMESTAMP] = ce->runtime.last;
+ regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
}
static void init_wa_bb_regs(u32 * const regs,
@@ -1208,6 +1208,10 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
IS_DG2_G11(ce->engine->i915))
cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0);
+ /* hsdes: 1809175790 */
+ if (!HAS_FLAT_CCS(ce->engine->i915))
+ cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
+
return cs;
}
@@ -1225,6 +1229,14 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE,
0);
+ /* hsdes: 1809175790 */
+ if (!HAS_FLAT_CCS(ce->engine->i915)) {
+ if (ce->engine->class == VIDEO_DECODE_CLASS)
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
+ else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS)
+ cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
+ }
+
return cs;
}
@@ -1722,11 +1734,12 @@ err:
}
}
-static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
+static void st_runtime_underflow(struct intel_context_stats *stats, s32 dt)
{
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
- ce->runtime.num_underflow++;
- ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+ stats->runtime.num_underflow++;
+ stats->runtime.max_underflow =
+ max_t(u32, stats->runtime.max_underflow, -dt);
#endif
}
@@ -1743,25 +1756,25 @@ static u32 lrc_get_runtime(const struct intel_context *ce)
void lrc_update_runtime(struct intel_context *ce)
{
+ struct intel_context_stats *stats = &ce->stats;
u32 old;
s32 dt;
- if (intel_context_is_barrier(ce))
+ old = stats->runtime.last;
+ stats->runtime.last = lrc_get_runtime(ce);
+ dt = stats->runtime.last - old;
+ if (!dt)
return;
- old = ce->runtime.last;
- ce->runtime.last = lrc_get_runtime(ce);
- dt = ce->runtime.last - old;
-
if (unlikely(dt < 0)) {
CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
- old, ce->runtime.last, dt);
- st_update_runtime_underflow(ce, dt);
+ old, stats->runtime.last, dt);
+ st_runtime_underflow(stats, dt);
return;
}
- ewma_runtime_add(&ce->runtime.avg, dt);
- ce->runtime.total += dt;
+ ewma_runtime_add(&stats->runtime.avg, dt);
+ stats->runtime.total += dt;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index 6e4f9f58fca5..7371bb5c8129 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -11,9 +11,10 @@
#include <linux/bitfield.h>
#include <linux/types.h>
+#include "intel_context.h"
+
struct drm_i915_gem_object;
struct i915_gem_ww_ctx;
-struct intel_context;
struct intel_engine_cs;
struct intel_ring;
struct kref;
@@ -120,4 +121,28 @@ static inline u32 lrc_desc_priority(int prio)
return GEN12_CTX_PRIORITY_NORMAL;
}
+static inline void lrc_runtime_start(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ if (stats->active)
+ return;
+
+ WRITE_ONCE(stats->active, intel_context_clock());
+}
+
+static inline void lrc_runtime_stop(struct intel_context *ce)
+{
+ struct intel_context_stats *stats = &ce->stats;
+
+ if (!stats->active)
+ return;
+
+ lrc_update_runtime(ce);
+ WRITE_ONCE(stats->active, 0);
+}
+
#endif /* __INTEL_LRC_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 20444d6ceb3c..9d552f30b627 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -17,6 +17,8 @@ struct insert_pte_data {
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
+#define GET_CCS_BYTES(i915, size) (HAS_FLAT_CCS(i915) ? \
+ DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE) : 0)
static bool engine_supports_migration(struct intel_engine_cs *engine)
{
if (!engine)
@@ -467,6 +469,123 @@ static bool wa_1209644611_applies(int ver, u32 size)
return height % 4 == 3 && height <= 8;
}
+/**
+ * DOC: Flat-CCS - Memory compression for Local memory
+ *
+ * On Xe-HP and later devices, we use dedicated compression control state (CCS)
+ * stored in local memory for each surface, to support the 3D and media
+ * compression formats.
+ *
+ * The memory required for the CCS of the entire local memory is 1/256 of the
+ * local memory size. So before the kernel boot, the required memory is reserved
+ * for the CCS data and a secure register will be programmed with the CCS base
+ * address.
+ *
+ * Flat CCS data needs to be cleared when a lmem object is allocated.
+ * And CCS data can be copied in and out of CCS region through
+ * XY_CTRL_SURF_COPY_BLT. CPU can't access the CCS data directly.
+ *
+ * When we exhaust the lmem, if the object's placements support smem, then we can
+ * directly decompress the compressed lmem object into smem and start using it
+ * from smem itself.
+ *
+ * But when we need to swapout the compressed lmem object into a smem region
+ * though objects' placement doesn't support smem, then we copy the lmem content
+ * as it is into smem region along with ccs data (using XY_CTRL_SURF_COPY_BLT).
+ * When the object is referred, lmem content will be swaped in along with
+ * restoration of the CCS data (using XY_CTRL_SURF_COPY_BLT) at corresponding
+ * location.
+ */
+
+static inline u32 *i915_flush_dw(u32 *cmd, u32 flags)
+{
+ *cmd++ = MI_FLUSH_DW | flags;
+ *cmd++ = 0;
+ *cmd++ = 0;
+
+ return cmd;
+}
+
+static u32 calc_ctrl_surf_instr_size(struct drm_i915_private *i915, int size)
+{
+ u32 num_cmds, num_blks, total_size;
+
+ if (!GET_CCS_BYTES(i915, size))
+ return 0;
+
+ /*
+ * XY_CTRL_SURF_COPY_BLT transfers CCS in 256 byte
+ * blocks. one XY_CTRL_SURF_COPY_BLT command can
+ * transfer upto 1024 blocks.
+ */
+ num_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
+ NUM_CCS_BYTES_PER_BLOCK);
+ num_cmds = DIV_ROUND_UP(num_blks, NUM_CCS_BLKS_PER_XFER);
+ total_size = XY_CTRL_SURF_INSTR_SIZE * num_cmds;
+
+ /*
+ * Adding a flush before and after XY_CTRL_SURF_COPY_BLT
+ */
+ total_size += 2 * MI_FLUSH_DW_SIZE;
+
+ return total_size;
+}
+
+static int emit_copy_ccs(struct i915_request *rq,
+ u32 dst_offset, u8 dst_access,
+ u32 src_offset, u8 src_access, int size)
+{
+ struct drm_i915_private *i915 = rq->engine->i915;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ u32 num_ccs_blks, ccs_ring_size;
+ u32 *cs;
+
+ ccs_ring_size = calc_ctrl_surf_instr_size(i915, size);
+ WARN_ON(!ccs_ring_size);
+
+ cs = intel_ring_begin(rq, round_up(ccs_ring_size, 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ num_ccs_blks = DIV_ROUND_UP(GET_CCS_BYTES(i915, size),
+ NUM_CCS_BYTES_PER_BLOCK);
+ GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+
+ /*
+ * The XY_CTRL_SURF_COPY_BLT instruction is used to copy the CCS
+ * data in and out of the CCS region.
+ *
+ * We can copy at most 1024 blocks of 256 bytes using one
+ * XY_CTRL_SURF_COPY_BLT instruction.
+ *
+ * In case we need to copy more than 1024 blocks, we need to add
+ * another instruction to the same batch buffer.
+ *
+ * 1024 blocks of 256 bytes of CCS represent a total 256KB of CCS.
+ *
+ * 256 KB of CCS represents 256 * 256 KB = 64 MB of LMEM.
+ */
+ *cs++ = XY_CTRL_SURF_COPY_BLT |
+ src_access << SRC_ACCESS_TYPE_SHIFT |
+ dst_access << DST_ACCESS_TYPE_SHIFT |
+ ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
+ *cs++ = src_offset;
+ *cs++ = rq->engine->instance |
+ FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
+ *cs++ = dst_offset;
+ *cs++ = rq->engine->instance |
+ FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, mocs);
+
+ cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
+ if (ccs_ring_size & 1)
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
static int emit_copy(struct i915_request *rq,
u32 dst_offset, u32 src_offset, int size)
{
@@ -514,6 +633,65 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
+static int scatter_list_length(struct scatterlist *sg)
+{
+ int len = 0;
+
+ while (sg && sg_dma_len(sg)) {
+ len += sg_dma_len(sg);
+ sg = sg_next(sg);
+ };
+
+ return len;
+}
+
+static void
+calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
+ int *src_sz, int *ccs_sz, u32 bytes_to_cpy,
+ u32 ccs_bytes_to_cpy)
+{
+ if (ccs_bytes_to_cpy) {
+ /*
+ * We can only copy the ccs data corresponding to
+ * the CHUNK_SZ of lmem which is
+ * GET_CCS_BYTES(i915, CHUNK_SZ))
+ */
+ *ccs_sz = min_t(int, ccs_bytes_to_cpy, GET_CCS_BYTES(i915, CHUNK_SZ));
+
+ if (!src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
+ } else { /* ccs handling is not required */
+ *src_sz = CHUNK_SZ;
+ }
+}
+
+static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+{
+ u32 len;
+
+ do {
+ GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
+ len = it->max - it->dma;
+ if (len > bytes_to_cpy) {
+ it->dma += bytes_to_cpy;
+ break;
+ }
+
+ bytes_to_cpy -= len;
+
+ it->sg = __sg_next(it->sg);
+ it->dma = sg_dma_address(it->sg);
+ it->max = it->dma + sg_dma_len(it->sg);
+ } while (bytes_to_cpy);
+}
+
int
intel_context_migrate_copy(struct intel_context *ce,
const struct i915_deps *deps,
@@ -525,17 +703,67 @@ intel_context_migrate_copy(struct intel_context *ce,
bool dst_is_lmem,
struct i915_request **out)
{
- struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst);
+ struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ enum i915_cache_level ccs_cache_level;
+ int src_sz, dst_sz, ccs_sz;
+ u32 src_offset, dst_offset;
+ u8 src_access, dst_access;
struct i915_request *rq;
+ bool ccs_is_src;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
*out = NULL;
GEM_BUG_ON(ce->ring->size < SZ_64K);
+ src_sz = scatter_list_length(src);
+ bytes_to_cpy = src_sz;
+
+ if (HAS_FLAT_CCS(i915) && src_is_lmem ^ dst_is_lmem) {
+ src_access = !src_is_lmem && dst_is_lmem;
+ dst_access = !src_access;
+
+ dst_sz = scatter_list_length(dst);
+ if (src_is_lmem) {
+ it_ccs = it_dst;
+ ccs_cache_level = dst_cache_level;
+ ccs_is_src = false;
+ } else if (dst_is_lmem) {
+ bytes_to_cpy = dst_sz;
+ it_ccs = it_src;
+ ccs_cache_level = src_cache_level;
+ ccs_is_src = true;
+ }
+
+ /*
+ * When there is a eviction of ccs needed smem will have the
+ * extra pages for the ccs data
+ *
+ * TO-DO: Want to move the size mismatch check to a WARN_ON,
+ * but still we have some requests of smem->lmem with same size.
+ * Need to fix it.
+ */
+ ccs_bytes_to_cpy = src_sz != dst_sz ? GET_CCS_BYTES(i915, bytes_to_cpy) : 0;
+ if (ccs_bytes_to_cpy)
+ get_ccs_sg_sgt(&it_ccs, bytes_to_cpy);
+ }
+
+ src_offset = 0;
+ dst_offset = CHUNK_SZ;
+ if (HAS_64K_PAGES(ce->engine->i915)) {
+ src_offset = 0;
+ dst_offset = 0;
+ if (src_is_lmem)
+ src_offset = CHUNK_SZ;
+ if (dst_is_lmem)
+ dst_offset = 2 * CHUNK_SZ;
+ }
+
do {
- u32 src_offset, dst_offset;
int len;
rq = i915_request_create(ce);
@@ -563,22 +791,16 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- src_offset = 0;
- dst_offset = CHUNK_SZ;
- if (HAS_64K_PAGES(ce->engine->i915)) {
- GEM_BUG_ON(!src_is_lmem && !dst_is_lmem);
-
- src_offset = 0;
- dst_offset = 0;
- if (src_is_lmem)
- src_offset = CHUNK_SZ;
- if (dst_is_lmem)
- dst_offset = 2 * CHUNK_SZ;
- }
+ calculate_chunk_sz(i915, src_is_lmem, &src_sz, &ccs_sz,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
- src_offset, CHUNK_SZ);
- if (len <= 0) {
+ src_offset, src_sz);
+ if (!len) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+ if (len < 0) {
err = len;
goto out_rq;
}
@@ -596,7 +818,46 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- err = emit_copy(rq, dst_offset, src_offset, len);
+ err = emit_copy(rq, dst_offset, src_offset, len);
+ if (err)
+ goto out_rq;
+
+ bytes_to_cpy -= len;
+
+ if (ccs_bytes_to_cpy) {
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_pte(rq, &it_ccs, ccs_cache_level, false,
+ ccs_is_src ? src_offset : dst_offset,
+ ccs_sz);
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /*
+ * Using max of src_sz and dst_sz, as we need to
+ * pass the lmem size corresponding to the ccs
+ * blocks we need to handle.
+ */
+ ccs_sz = max_t(int, ccs_is_src ? ccs_sz : src_sz,
+ ccs_is_src ? dst_sz : ccs_sz);
+
+ err = emit_copy_ccs(rq, dst_offset, dst_access,
+ src_offset, src_access, ccs_sz);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ /* Converting back to ccs bytes */
+ ccs_sz = GET_CCS_BYTES(rq->engine->i915, ccs_sz);
+ ccs_bytes_to_cpy -= ccs_sz;
+ }
/* Arbitration is re-enabled between requests. */
out_rq:
@@ -604,9 +865,26 @@ out_rq:
i915_request_put(*out);
*out = i915_request_get(rq);
i915_request_add(rq);
- if (err || !it_src.sg || !sg_dma_len(it_src.sg))
+
+ if (err)
break;
+ if (!bytes_to_cpy && !ccs_bytes_to_cpy) {
+ if (src_is_lmem)
+ WARN_ON(it_src.sg && sg_dma_len(it_src.sg));
+ else
+ WARN_ON(it_dst.sg && sg_dma_len(it_dst.sg));
+ break;
+ }
+
+ if (WARN_ON(!it_src.sg || !sg_dma_len(it_src.sg) ||
+ !it_dst.sg || !sg_dma_len(it_dst.sg) ||
+ (ccs_bytes_to_cpy && (!it_ccs.sg ||
+ !sg_dma_len(it_ccs.sg))))) {
+ err = -EINVAL;
+ break;
+ }
+
cond_resched();
} while (1);
@@ -614,35 +892,65 @@ out_ce:
return err;
}
-static int emit_clear(struct i915_request *rq, u64 offset, int size, u32 value)
+static int emit_clear(struct i915_request *rq, u32 offset, int size,
+ u32 value, bool is_lmem)
{
- const int ver = GRAPHICS_VER(rq->engine->i915);
+ struct drm_i915_private *i915 = rq->engine->i915;
+ int mocs = rq->engine->gt->mocs.uc_index << 1;
+ const int ver = GRAPHICS_VER(i915);
+ int ring_sz;
u32 *cs;
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- offset += (u64)rq->engine->instance << 32;
+ if (HAS_FLAT_CCS(i915) && ver >= 12)
+ ring_sz = XY_FAST_COLOR_BLT_DW;
+ else if (ver >= 8)
+ ring_sz = 8;
+ else
+ ring_sz = 6;
- cs = intel_ring_begin(rq, ver >= 8 ? 8 : 6);
+ cs = intel_ring_begin(rq, ring_sz);
if (IS_ERR(cs))
return PTR_ERR(cs);
- if (ver >= 8) {
+ if (HAS_FLAT_CCS(i915) && ver >= 12) {
+ *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
+ (XY_FAST_COLOR_BLT_DW - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
+ (PAGE_SIZE - 1);
+ *cs++ = 0;
+ *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+ *cs++ = offset;
+ *cs++ = rq->engine->instance;
+ *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
+ /* BG7 */
+ *cs++ = value;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ /* BG11 */
+ *cs++ = 0;
+ *cs++ = 0;
+ /* BG13 */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ } else if (ver >= 8) {
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
+ *cs++ = offset;
+ *cs++ = rq->engine->instance;
*cs++ = value;
*cs++ = MI_NOOP;
} else {
- GEM_BUG_ON(upper_32_bits(offset));
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
*cs++ = 0;
*cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
- *cs++ = lower_32_bits(offset);
+ *cs++ = offset;
*cs++ = value;
}
@@ -659,8 +967,10 @@ intel_context_migrate_clear(struct intel_context *ce,
u32 value,
struct i915_request **out)
{
+ struct drm_i915_private *i915 = ce->engine->i915;
struct sgt_dma it = sg_sgt(sg);
struct i915_request *rq;
+ u32 offset;
int err;
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
@@ -668,8 +978,11 @@ intel_context_migrate_clear(struct intel_context *ce,
GEM_BUG_ON(ce->ring->size < SZ_64K);
+ offset = 0;
+ if (HAS_64K_PAGES(i915) && is_lmem)
+ offset = CHUNK_SZ;
+
do {
- u32 offset;
int len;
rq = i915_request_create(ce);
@@ -697,10 +1010,6 @@ intel_context_migrate_clear(struct intel_context *ce,
if (err)
goto out_rq;
- offset = 0;
- if (HAS_64K_PAGES(ce->engine->i915) && is_lmem)
- offset = CHUNK_SZ;
-
len = emit_pte(rq, &it, cache_level, is_lmem, offset, CHUNK_SZ);
if (len <= 0) {
err = len;
@@ -711,7 +1020,22 @@ intel_context_migrate_clear(struct intel_context *ce,
if (err)
goto out_rq;
- err = emit_clear(rq, offset, len, value);
+ err = emit_clear(rq, offset, len, value, is_lmem);
+ if (err)
+ goto out_rq;
+
+ if (HAS_FLAT_CCS(i915) && is_lmem && !value) {
+ /*
+ * copy the content of memory into corresponding
+ * ccs surface
+ */
+ err = emit_copy_ccs(rq, offset, INDIRECT_ACCESS, offset,
+ DIRECT_ACCESS, len);
+ if (err)
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
/* Arbitration is re-enabled between requests. */
out_rq:
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index d91e2beb7517..d8b94d638559 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -91,7 +91,7 @@ write_dma_entry(struct drm_i915_gem_object * const pdma,
u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
- clflush_cache_range(&vaddr[idx], sizeof(u64));
+ drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
}
void
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 63db136cbc27..b4770690e794 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -6,6 +6,7 @@
#include <linux/pm_runtime.h>
#include <linux/string_helpers.h>
+#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
@@ -325,9 +326,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
resource_size_t pcbr_offset;
pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
- pctx = i915_gem_object_create_stolen_for_preallocated(i915,
- pcbr_offset,
- pctx_size);
+ pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
+ pcbr_offset,
+ pctx_size,
+ 0);
if (IS_ERR(pctx))
return PTR_ERR(pctx);
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 6cecfdae07ad..f5111c0a0060 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -93,6 +93,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
struct intel_memory_region *mem;
resource_size_t min_page_size;
resource_size_t io_start;
+ resource_size_t io_size;
resource_size_t lmem_size;
int err;
@@ -122,9 +123,14 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
}
+ if (i915->params.lmem_size > 0) {
+ lmem_size = min_t(resource_size_t, lmem_size,
+ mul_u32_u32(i915->params.lmem_size, SZ_1M));
+ }
io_start = pci_resource_start(pdev, 2);
- if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
+ io_size = min(pci_resource_len(pdev, 2), lmem_size);
+ if (!io_size)
return ERR_PTR(-ENODEV);
min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
@@ -134,7 +140,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
lmem_size,
min_page_size,
io_start,
- lmem_size,
+ io_size,
INTEL_MEMORY_LOCAL,
0,
&intel_region_lmem_ops);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index a6ae213c7d89..5422a3b84bd4 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -772,14 +772,15 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
+ /* For GuC mode, ensure submission is disabled before stopping ring */
+ intel_uc_reset_prepare(&gt->uc);
+
for_each_engine(engine, gt, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
- intel_uc_reset_prepare(&gt->uc);
-
return awake;
}
@@ -1319,7 +1320,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
engine_mask &= gt->info.engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(gt, engine_mask);
+ i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
intel_gt_clear_error_registers(gt, engine_mask);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 6d7ec3bf1f32..5423bfd301ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -767,7 +767,7 @@ static int mi_set_context(struct i915_request *rq,
if (GRAPHICS_VER(i915) == 7) {
if (num_engines) {
struct intel_engine_cs *signaller;
- i915_reg_t last_reg = {}; /* keep gcc quiet */
+ i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */
*cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, engine->gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index a9c13b1a3018..3476a11f294c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1070,24 +1070,67 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
return 0;
}
-static void gen6_rps_init(struct intel_rps *rps)
+static u32 intel_rps_read_state_cap(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- u32 rp_state_cap = intel_rps_read_state_cap(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
- /* All of these values are in units of 50MHz */
+ if (IS_XEHPSDV(i915))
+ return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
+ else if (IS_GEN9_LP(i915))
+ return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ else
+ return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+}
+
+/**
+ * gen6_rps_get_freq_caps - Get freq caps exposed by HW
+ * @rps: the intel_rps structure
+ * @caps: returned freq caps
+ *
+ * Returned "caps" frequencies should be converted to MHz using
+ * intel_gpu_freq()
+ */
+void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ u32 rp_state_cap;
+
+ rp_state_cap = intel_rps_read_state_cap(rps);
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(i915)) {
- rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 0) & 0xff;
+ caps->rp0_freq = (rp_state_cap >> 16) & 0xff;
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ caps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
- rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
- rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
- rps->min_freq = (rp_state_cap >> 16) & 0xff;
+ caps->rp0_freq = (rp_state_cap >> 0) & 0xff;
+ caps->rp1_freq = (rp_state_cap >> 8) & 0xff;
+ caps->min_freq = (rp_state_cap >> 16) & 0xff;
}
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
+ /*
+ * In this case rp_state_cap register reports frequencies in
+ * units of 50 MHz. Convert these to the actual "hw unit", i.e.
+ * units of 16.67 MHz
+ */
+ caps->rp0_freq *= GEN9_FREQ_SCALER;
+ caps->rp1_freq *= GEN9_FREQ_SCALER;
+ caps->min_freq *= GEN9_FREQ_SCALER;
+ }
+}
+
+static void gen6_rps_init(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
+ rps->rp0_freq = caps.rp0_freq;
+ rps->rp1_freq = caps.rp1_freq;
+ rps->min_freq = caps.min_freq;
+
/* hw_max = RP0 until we check for overclocking */
rps->max_freq = rps->rp0_freq;
@@ -1095,26 +1138,18 @@ static void gen6_rps_init(struct intel_rps *rps)
if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
u32 ddcc_status = 0;
+ u32 mult = 1;
+ if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11)
+ mult = GEN9_FREQ_SCALER;
if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status, NULL) == 0)
rps->efficient_freq =
- clamp_t(u8,
- (ddcc_status >> 8) & 0xff,
+ clamp_t(u32,
+ ((ddcc_status >> 8) & 0xff) * mult,
rps->min_freq,
rps->max_freq);
}
-
- if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
- /* Store the frequency values in 16.66 MHZ units, which is
- * the natural hardware unit for SKL
- */
- rps->rp0_freq *= GEN9_FREQ_SCALER;
- rps->rp1_freq *= GEN9_FREQ_SCALER;
- rps->min_freq *= GEN9_FREQ_SCALER;
- rps->max_freq *= GEN9_FREQ_SCALER;
- rps->efficient_freq *= GEN9_FREQ_SCALER;
- }
}
static bool rps_reset(struct intel_rps *rps)
@@ -2219,19 +2254,6 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
return set_min_freq(rps, val);
}
-u32 intel_rps_read_state_cap(struct intel_rps *rps)
-{
- struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_uncore *uncore = rps_to_uncore(rps);
-
- if (IS_XEHPSDV(i915))
- return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
- else if (IS_GEN9_LP(i915))
- return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
- else
- return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
-}
-
static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
@@ -2244,18 +2266,18 @@ static void intel_rps_set_manual(struct intel_rps *rps, bool enable)
void intel_rps_raise_unslice(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
- u32 rp0_unslice_req;
mutex_lock(&rps->lock);
if (rps_uses_slpc(rps)) {
/* RP limits have not been initialized yet for SLPC path */
- rp0_unslice_req = ((intel_rps_read_state_cap(rps) >> 0)
- & 0xff) * GEN9_FREQ_SCALER;
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
intel_rps_set_manual(rps, true);
intel_uncore_write(uncore, GEN6_RPNSWREQ,
- ((rp0_unslice_req <<
+ ((caps.rp0_freq <<
GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
GEN9_IGNORE_SLICE_RATIO));
intel_rps_set_manual(rps, false);
@@ -2269,18 +2291,18 @@ void intel_rps_raise_unslice(struct intel_rps *rps)
void intel_rps_lower_unslice(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
- u32 rpn_unslice_req;
mutex_lock(&rps->lock);
if (rps_uses_slpc(rps)) {
/* RP limits have not been initialized yet for SLPC path */
- rpn_unslice_req = ((intel_rps_read_state_cap(rps) >> 16)
- & 0xff) * GEN9_FREQ_SCALER;
+ struct intel_rps_freq_caps caps;
+
+ gen6_rps_get_freq_caps(rps, &caps);
intel_rps_set_manual(rps, true);
intel_uncore_write(uncore, GEN6_RPNSWREQ,
- ((rpn_unslice_req <<
+ ((caps.min_freq <<
GEN9_SW_REQ_UNSLICE_RATIO_SHIFT) |
GEN9_IGNORE_SLICE_RATIO));
intel_rps_set_manual(rps, false);
@@ -2291,6 +2313,24 @@ void intel_rps_lower_unslice(struct intel_rps *rps)
mutex_unlock(&rps->lock);
}
+static u32 rps_read_mmio(struct intel_rps *rps, i915_reg_t reg32)
+{
+ struct intel_gt *gt = rps_to_gt(rps);
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ val = intel_uncore_read(gt->uncore, reg32);
+
+ return val;
+}
+
+bool rps_read_mask_mmio(struct intel_rps *rps,
+ i915_reg_t reg32, u32 mask)
+{
+ return rps_read_mmio(rps, reg32) & mask;
+}
+
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index c6d76a3d1331..1e8d56491308 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -7,6 +7,7 @@
#define INTEL_RPS_H
#include "intel_rps_types.h"
+#include "i915_reg_defs.h"
struct i915_request;
@@ -44,10 +45,13 @@ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
-u32 intel_rps_read_state_cap(struct intel_rps *rps);
+void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
void intel_rps_raise_unslice(struct intel_rps *rps);
void intel_rps_lower_unslice(struct intel_rps *rps);
+u32 intel_rps_read_throttle_reason(struct intel_rps *rps);
+bool rps_read_mask_mmio(struct intel_rps *rps, i915_reg_t reg32, u32 mask);
+
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h
index 3941d8551f52..9173ec75f2b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h
@@ -37,6 +37,21 @@ enum {
INTEL_RPS_TIMER,
};
+/**
+ * struct intel_rps_freq_caps - rps freq capabilities
+ * @rp0_freq: non-overclocked max frequency
+ * @rp1_freq: "less than" RP0 power/freqency
+ * @min_freq: aka RPn, minimum frequency
+ *
+ * Freq caps exposed by HW, values are in "hw units" and intel_gpu_freq()
+ * should be used to convert to MHz
+ */
+struct intel_rps_freq_caps {
+ u8 rp0_freq;
+ u8 rp1_freq;
+ u8 min_freq;
+};
+
struct intel_rps {
struct mutex lock; /* protects enabling and the worker */
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 614915ffbd37..9881a6790574 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -10,6 +10,8 @@
#include "intel_gt_regs.h"
#include "intel_sseu.h"
+#include "linux/string_helpers.h"
+
void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices,
u8 max_subslices, u8 max_eus_per_subslice)
{
@@ -35,8 +37,8 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
}
static u32
-_intel_sseu_get_subslices(const struct sseu_dev_info *sseu,
- const u8 *subslice_mask, u8 slice)
+sseu_get_subslices(const struct sseu_dev_info *sseu,
+ const u8 *subslice_mask, u8 slice)
{
int i, offset = slice * sseu->ss_stride;
u32 mask = 0;
@@ -51,12 +53,17 @@ _intel_sseu_get_subslices(const struct sseu_dev_info *sseu,
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
{
- return _intel_sseu_get_subslices(sseu, sseu->subslice_mask, slice);
+ return sseu_get_subslices(sseu, sseu->subslice_mask, slice);
+}
+
+static u32 sseu_get_geometry_subslices(const struct sseu_dev_info *sseu)
+{
+ return sseu_get_subslices(sseu, sseu->geometry_subslice_mask, 0);
}
u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu)
{
- return _intel_sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
+ return sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
}
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
@@ -720,16 +727,11 @@ void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
str_yes_no(sseu->has_eu_pg));
}
-void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
- struct drm_printer *p)
+static void sseu_print_hsw_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
{
int s, ss;
- if (sseu->max_slices == 0) {
- drm_printf(p, "Unavailable\n");
- return;
- }
-
for (s = 0; s < sseu->max_slices; s++) {
drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n",
s, intel_sseu_subslices_per_slice(sseu, s),
@@ -744,6 +746,36 @@ void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
}
}
+static void sseu_print_xehp_topology(const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ u32 g_dss_mask = sseu_get_geometry_subslices(sseu);
+ u32 c_dss_mask = intel_sseu_get_compute_subslices(sseu);
+ int dss;
+
+ for (dss = 0; dss < sseu->max_subslices; dss++) {
+ u16 enabled_eus = sseu_get_eus(sseu, 0, dss);
+
+ drm_printf(p, "DSS_%02d: G:%3s C:%3s, %2u EUs (0x%04hx)\n", dss,
+ str_yes_no(g_dss_mask & BIT(dss)),
+ str_yes_no(c_dss_mask & BIT(dss)),
+ hweight16(enabled_eus), enabled_eus);
+ }
+}
+
+void intel_sseu_print_topology(struct drm_i915_private *i915,
+ const struct sseu_dev_info *sseu,
+ struct drm_printer *p)
+{
+ if (sseu->max_slices == 0) {
+ drm_printf(p, "Unavailable\n");
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ sseu_print_xehp_topology(sseu, p);
+ } else {
+ sseu_print_hsw_topology(sseu, p);
+ }
+}
+
u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice)
{
u16 slice_mask = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 8a79cd8eaab4..5c078df4729c 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -15,26 +15,49 @@ struct drm_i915_private;
struct intel_gt;
struct drm_printer;
-#define GEN_MAX_SLICES (3) /* SKL upper bound */
-#define GEN_MAX_SUBSLICES (32) /* XEHPSDV upper bound */
-#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
-#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
-#define GEN_MAX_EUS (16) /* TGL upper bound */
-#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
+/*
+ * Maximum number of slices on older platforms. Slices no longer exist
+ * starting on Xe_HP ("gslices," "cslices," etc. are a different concept and
+ * are not expressed through fusing).
+ */
+#define GEN_MAX_HSW_SLICES 3
+
+/*
+ * Maximum number of subslices that can exist within a HSW-style slice. This
+ * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
+ * GEN_MAX_DSS value below).
+ */
+#define GEN_MAX_SS_PER_HSW_SLICE 6
+
+/* Maximum number of DSS on newer platforms (Xe_HP and beyond). */
+#define GEN_MAX_DSS 32
+
+/* Maximum number of EUs that can exist within a subslice or DSS. */
+#define GEN_MAX_EUS_PER_SS 16
+
+#define SSEU_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+/* The maximum number of bits needed to express each subslice/DSS independently */
+#define GEN_SS_MASK_SIZE SSEU_MAX(GEN_MAX_DSS, \
+ GEN_MAX_HSW_SLICES * GEN_MAX_SS_PER_HSW_SLICE)
+
+#define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
+#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_SS_MASK_SIZE)
+#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS_PER_SS)
#define GEN_DSS_PER_GSLICE 4
#define GEN_DSS_PER_CSLICE 8
#define GEN_DSS_PER_MSLICE 8
-#define GEN_MAX_GSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_GSLICE)
-#define GEN_MAX_CSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_CSLICE)
+#define GEN_MAX_GSLICES (GEN_MAX_DSS / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (GEN_MAX_DSS / GEN_DSS_PER_CSLICE)
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
- u8 geometry_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
- u8 compute_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
- u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
+ u8 subslice_mask[GEN_SS_MASK_SIZE];
+ u8 geometry_subslice_mask[GEN_SS_MASK_SIZE];
+ u8 compute_subslice_mask[GEN_SS_MASK_SIZE];
+ u8 eu_mask[GEN_SS_MASK_SIZE * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -116,7 +139,8 @@ u32 intel_sseu_make_rpcs(struct intel_gt *gt,
const struct intel_sseu *req_sseu);
void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
-void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
+void intel_sseu_print_topology(struct drm_i915_private *i915,
+ const struct sseu_dev_info *sseu,
struct drm_printer *p);
u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index a9d5bc49f361..2d5d011e01db 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -248,7 +248,7 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
const struct intel_gt_info *info = &gt->info;
- struct sseu_dev_info sseu;
+ struct sseu_dev_info *sseu;
intel_wakeref_t wakeref;
if (GRAPHICS_VER(i915) < 8)
@@ -258,23 +258,29 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu);
seq_puts(m, "SSEU Device Status\n");
- memset(&sseu, 0, sizeof(sseu));
- intel_sseu_set_info(&sseu, info->sseu.max_slices,
+
+ sseu = kzalloc(sizeof(*sseu), GFP_KERNEL);
+ if (!sseu)
+ return -ENOMEM;
+
+ intel_sseu_set_info(sseu, info->sseu.max_slices,
info->sseu.max_subslices,
info->sseu.max_eus_per_subslice);
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
if (IS_CHERRYVIEW(i915))
- cherryview_sseu_device_status(gt, &sseu);
+ cherryview_sseu_device_status(gt, sseu);
else if (IS_BROADWELL(i915))
- bdw_sseu_device_status(gt, &sseu);
+ bdw_sseu_device_status(gt, sseu);
else if (GRAPHICS_VER(i915) == 9)
- gen9_sseu_device_status(gt, &sseu);
+ gen9_sseu_device_status(gt, sseu);
else if (GRAPHICS_VER(i915) >= 11)
- gen11_sseu_device_status(gt, &sseu);
+ gen11_sseu_device_status(gt, sseu);
}
- i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
+ i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), sseu);
+
+ kfree(sseu);
return 0;
}
@@ -287,22 +293,22 @@ static int sseu_status_show(struct seq_file *m, void *unused)
}
DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
-static int rcs_topology_show(struct seq_file *m, void *unused)
+static int sseu_topology_show(struct seq_file *m, void *unused)
{
struct intel_gt *gt = m->private;
struct drm_printer p = drm_seq_file_printer(m);
- intel_sseu_print_topology(&gt->info.sseu, &p);
+ intel_sseu_print_topology(gt->i915, &gt->info.sseu, &p);
return 0;
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_topology);
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "sseu_status", &sseu_status_fops, NULL },
- { "rcs_topology", &rcs_topology_fops, NULL },
+ { "sseu_topology", &sseu_topology_fops, NULL },
};
intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index c014b40d2e9f..a05c4b99b3fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1072,9 +1072,15 @@ static void __set_mcr_steering(struct i915_wa_list *wal,
static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
unsigned int slice, unsigned int subslice)
{
- drm_dbg(&gt->i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
+ struct drm_printer p = drm_debug_printer("MCR Steering:");
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
+
+ gt->default_steering.groupid = slice;
+ gt->default_steering.instanceid = subslice;
+
+ if (drm_debug_enabled(DRM_UT_DRIVER))
+ intel_gt_report_steering(&p, gt, false);
}
static void
@@ -2188,11 +2194,15 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_write_or(wal, GEN7_FF_THREAD_MODE,
GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+ }
+ if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) ||
+ IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
* Wa_1606700617:tgl,dg1,adl-p
* Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
* Wa_14010826681:tgl,dg1,rkl,adl-p
+ * Wa_18019627453:dg2
*/
wa_masked_en(wal,
GEN9_CS_DEBUG_MODE1,
@@ -2310,7 +2320,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
FF_DOP_CLOCK_GATE_DISABLE);
}
- if (IS_GRAPHICS_VER(i915, 9, 12)) {
+ if (HAS_PERCTX_PREEMPT_CTRL(i915)) {
/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -2618,6 +2628,11 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
GLOBAL_INVALIDATION_MODE);
}
+
+ if (IS_DG2(i915)) {
+ /* Wa_22014226127:dg2 */
+ wa_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+ }
}
static void
@@ -2633,7 +2648,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->class == RENDER_CLASS)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
general_render_compute_wa_init(engine, wal);
if (engine->class == RENDER_CLASS)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 72d5faab8f9a..09f8cd2d0e2c 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -1736,15 +1736,9 @@ static int live_preempt(void *arg)
enum intel_engine_id id;
int err = -ENOMEM;
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
- goto err_spin_lo;
+ return -ENOMEM;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
@@ -1752,6 +1746,12 @@ static int live_preempt(void *arg)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ if (igt_spinner_init(&spin_hi, gt))
+ goto err_ctx_lo;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
for_each_engine(engine, gt, id) {
struct igt_live_test t;
struct i915_request *rq;
@@ -1761,14 +1761,14 @@ static int live_preempt(void *arg)
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1777,7 +1777,7 @@ static int live_preempt(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
@@ -1785,7 +1785,7 @@ static int live_preempt(void *arg)
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1794,7 +1794,7 @@ static int live_preempt(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
igt_spinner_end(&spin_hi);
@@ -1802,19 +1802,19 @@ static int live_preempt(void *arg)
if (igt_live_test_end(&t)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
}
err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
return err;
}
@@ -1828,20 +1828,20 @@ static int live_late_preempt(void *arg)
enum intel_engine_id id;
int err = -ENOMEM;
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
- goto err_spin_lo;
+ return -ENOMEM;
ctx_lo = kernel_context(gt->i915, NULL);
if (!ctx_lo)
goto err_ctx_hi;
+ if (igt_spinner_init(&spin_hi, gt))
+ goto err_ctx_lo;
+
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_spin_hi;
+
/* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
ctx_lo->sched.priority = 1;
@@ -1854,14 +1854,14 @@ static int live_late_preempt(void *arg)
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1875,7 +1875,7 @@ static int live_late_preempt(void *arg)
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
@@ -1898,19 +1898,19 @@ static int live_late_preempt(void *arg)
if (igt_live_test_end(&t)) {
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
}
err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
err_spin_lo:
igt_spinner_fini(&spin_lo);
err_spin_hi:
igt_spinner_fini(&spin_hi);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
return err;
err_wedged:
@@ -1918,7 +1918,7 @@ err_wedged:
igt_spinner_end(&spin_lo);
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
struct preempt_client {
@@ -3382,12 +3382,9 @@ static int live_preempt_timeout(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (igt_spinner_init(&spin_lo, gt))
- return -ENOMEM;
-
ctx_hi = kernel_context(gt->i915, NULL);
if (!ctx_hi)
- goto err_spin_lo;
+ return -ENOMEM;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
ctx_lo = kernel_context(gt->i915, NULL);
@@ -3395,6 +3392,9 @@ static int live_preempt_timeout(void *arg)
goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ if (igt_spinner_init(&spin_lo, gt))
+ goto err_ctx_lo;
+
for_each_engine(engine, gt, id) {
unsigned long saved_timeout;
struct i915_request *rq;
@@ -3406,21 +3406,21 @@ static int live_preempt_timeout(void *arg)
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
i915_request_add(rq);
if (!igt_wait_for_spinner(&spin_lo, rq)) {
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
rq = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(rq)) {
igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
- goto err_ctx_lo;
+ goto err_spin_lo;
}
/* Flush the previous CS ack before changing timeouts */
@@ -3440,7 +3440,7 @@ static int live_preempt_timeout(void *arg)
intel_gt_set_wedged(gt);
i915_request_put(rq);
err = -ETIME;
- goto err_ctx_lo;
+ goto err_spin_lo;
}
igt_spinner_end(&spin_lo);
@@ -3448,12 +3448,12 @@ static int live_preempt_timeout(void *arg)
}
err = 0;
+err_spin_lo:
+ igt_spinner_fini(&spin_lo);
err_ctx_lo:
kernel_context_close(ctx_lo);
err_ctx_hi:
kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 21c29d315cc0..6ba52ef1acb8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1753,8 +1753,8 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
if (IS_ERR(ce))
return PTR_ERR(ce);
- ce->runtime.num_underflow = 0;
- ce->runtime.max_underflow = 0;
+ ce->stats.runtime.num_underflow = 0;
+ ce->stats.runtime.max_underflow = 0;
do {
unsigned int loop = 1024;
@@ -1792,11 +1792,11 @@ static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
intel_context_get_avg_runtime_ns(ce));
err = 0;
- if (ce->runtime.num_underflow) {
+ if (ce->stats.runtime.num_underflow) {
pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
engine->name,
- ce->runtime.num_underflow,
- ce->runtime.max_underflow);
+ ce->stats.runtime.num_underflow,
+ ce->stats.runtime.max_underflow);
GEM_TRACE_DUMP();
err = -EOVERFLOW;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index c9c4f391c5cc..2b0c87999949 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -132,6 +132,124 @@ err_free_src:
return err;
}
+static int intel_context_copy_ccs(struct intel_context *ce,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool write_to_ccs,
+ struct i915_request **out)
+{
+ u8 src_access = write_to_ccs ? DIRECT_ACCESS : INDIRECT_ACCESS;
+ u8 dst_access = write_to_ccs ? INDIRECT_ACCESS : DIRECT_ACCESS;
+ struct sgt_dma it = sg_sgt(sg);
+ struct i915_request *rq;
+ u32 offset;
+ int err;
+
+ GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+ *out = NULL;
+
+ GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+ offset = 0;
+ if (HAS_64K_PAGES(ce->engine->i915))
+ offset = CHUNK_SZ;
+
+ do {
+ int len;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_ce;
+ }
+
+ if (deps) {
+ err = i915_request_await_deps(rq, deps);
+ if (err)
+ goto out_rq;
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ deps = NULL;
+ }
+
+ /* The PTE updates + clear must not be interrupted. */
+ err = emit_no_arbitration(rq);
+ if (err)
+ goto out_rq;
+
+ len = emit_pte(rq, &it, cache_level, true, offset, CHUNK_SZ);
+ if (len <= 0) {
+ err = len;
+ goto out_rq;
+ }
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto out_rq;
+
+ err = emit_copy_ccs(rq, offset, dst_access,
+ offset, src_access, len);
+ if (err)
+ goto out_rq;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+
+ /* Arbitration is re-enabled between requests. */
+out_rq:
+ if (*out)
+ i915_request_put(*out);
+ *out = i915_request_get(rq);
+ i915_request_add(rq);
+ if (err || !it.sg || !sg_dma_len(it.sg))
+ break;
+
+ cond_resched();
+ } while (1);
+
+out_ce:
+ return err;
+}
+
+static int
+intel_migrate_ccs_copy(struct intel_migrate *m,
+ struct i915_gem_ww_ctx *ww,
+ const struct i915_deps *deps,
+ struct scatterlist *sg,
+ enum i915_cache_level cache_level,
+ bool write_to_ccs,
+ struct i915_request **out)
+{
+ struct intel_context *ce;
+ int err;
+
+ *out = NULL;
+ if (!m->context)
+ return -ENODEV;
+
+ ce = intel_migrate_create_context(m);
+ if (IS_ERR(ce))
+ ce = intel_context_get(m->context);
+ GEM_BUG_ON(IS_ERR(ce));
+
+ err = intel_context_pin_ww(ce, ww);
+ if (err)
+ goto out;
+
+ err = intel_context_copy_ccs(ce, deps, sg, cache_level,
+ write_to_ccs, out);
+
+ intel_context_unpin(ce);
+out:
+ intel_context_put(ce);
+ return err;
+}
+
static int clear(struct intel_migrate *migrate,
int (*fn)(struct intel_migrate *migrate,
struct i915_gem_ww_ctx *ww,
@@ -144,7 +262,8 @@ static int clear(struct intel_migrate *migrate,
struct drm_i915_gem_object *obj;
struct i915_request *rq;
struct i915_gem_ww_ctx ww;
- u32 *vaddr;
+ u32 *vaddr, val = 0;
+ bool ccs_cap = false;
int err = 0;
int i;
@@ -152,7 +271,15 @@ static int clear(struct intel_migrate *migrate,
if (IS_ERR(obj))
return 0;
+ /* Consider the rounded up memory too */
+ sz = obj->base.size;
+
+ if (HAS_FLAT_CCS(i915) && i915_gem_object_is_lmem(obj))
+ ccs_cap = true;
+
for_i915_gem_ww(&ww, err, true) {
+ int ccs_bytes, ccs_bytes_per_chunk;
+
err = i915_gem_object_lock(obj, &ww);
if (err)
continue;
@@ -167,44 +294,114 @@ static int clear(struct intel_migrate *migrate,
vaddr[i] = ~i;
i915_gem_object_flush_map(obj);
- err = fn(migrate, &ww, obj, sz, &rq);
- if (!err)
- continue;
+ if (ccs_cap && !val) {
+ /* Write the obj data into ccs surface */
+ err = intel_migrate_ccs_copy(migrate, &ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ true, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n",
+ fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+ }
- if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
- pr_err("%ps failed, size: %u\n", fn, sz);
- if (rq) {
- i915_request_wait(rq, 0, HZ);
+ err = fn(migrate, &ww, obj, val, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n", fn, sz);
+ err = -ETIME;
+ }
i915_request_put(rq);
+ rq = NULL;
}
- i915_gem_object_unpin_map(obj);
- }
- if (err)
- goto err_out;
+ if (err)
+ continue;
- if (rq) {
- if (i915_request_wait(rq, 0, HZ) < 0) {
- pr_err("%ps timed out, size: %u\n", fn, sz);
- err = -ETIME;
+ i915_gem_object_flush_map(obj);
+
+ /* Verify the set/clear of the obj mem */
+ for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
+ int x = i * 1024 +
+ i915_prandom_u32_max_state(1024, prng);
+
+ if (vaddr[x] != val) {
+ pr_err("%ps failed, (%u != %u), offset: %zu\n",
+ fn, vaddr[x], val, x * sizeof(u32));
+ igt_hexdump(vaddr + i * 1024, 4096);
+ err = -EINVAL;
+ }
}
- i915_request_put(rq);
- }
+ if (err)
+ continue;
- for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
- int x = i * 1024 + i915_prandom_u32_max_state(1024, prng);
+ if (ccs_cap && !val) {
+ for (i = 0; i < sz / sizeof(u32); i++)
+ vaddr[i] = ~i;
+ i915_gem_object_flush_map(obj);
+
+ err = intel_migrate_ccs_copy(migrate, &ww, NULL,
+ obj->mm.pages->sgl,
+ obj->cache_level,
+ false, &rq);
+ if (rq && !err) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
+ pr_err("%ps timed out, size: %u\n",
+ fn, sz);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+ rq = NULL;
+ }
+ if (err)
+ continue;
+
+ ccs_bytes = GET_CCS_BYTES(i915, sz);
+ ccs_bytes_per_chunk = GET_CCS_BYTES(i915, CHUNK_SZ);
+ i915_gem_object_flush_map(obj);
+
+ for (i = 0; !err && i < DIV_ROUND_UP(ccs_bytes, PAGE_SIZE); i++) {
+ int offset = ((i * PAGE_SIZE) /
+ ccs_bytes_per_chunk) * CHUNK_SZ / sizeof(u32);
+ int ccs_bytes_left = (ccs_bytes - i * PAGE_SIZE) / sizeof(u32);
+ int x = i915_prandom_u32_max_state(min_t(int, 1024,
+ ccs_bytes_left), prng);
+
+ if (vaddr[offset + x]) {
+ pr_err("%ps ccs clearing failed, offset: %ld/%d\n",
+ fn, i * PAGE_SIZE + x * sizeof(u32), ccs_bytes);
+ igt_hexdump(vaddr + offset,
+ min_t(int, 4096,
+ ccs_bytes_left * sizeof(u32)));
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ continue;
+ }
+ i915_gem_object_unpin_map(obj);
+ }
- if (vaddr[x] != sz) {
- pr_err("%ps failed, size: %u, offset: %zu\n",
- fn, sz, x * sizeof(u32));
- igt_hexdump(vaddr + i * 1024, 4096);
- err = -EINVAL;
+ if (err) {
+ if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
+ pr_err("%ps failed, size: %u\n", fn, sz);
+ if (rq && err != -EINVAL) {
+ i915_request_wait(rq, 0, HZ);
+ i915_request_put(rq);
}
+
+ i915_gem_object_unpin_map(obj);
}
- i915_gem_object_unpin_map(obj);
-err_out:
i915_gem_object_put(obj);
-
return err;
}
@@ -621,13 +818,15 @@ static int perf_copy_blt(void *arg)
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
struct drm_i915_gem_object *src, *dst;
+ size_t sz;
int err;
src = create_init_lmem_internal(gt, sizes[i], true);
if (IS_ERR(src))
return PTR_ERR(src);
- dst = create_init_lmem_internal(gt, sizes[i], false);
+ sz = src->base.size;
+ dst = create_init_lmem_internal(gt, sz, false);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto err_src;
@@ -640,7 +839,7 @@ static int perf_copy_blt(void *arg)
dst->mm.pages->sgl,
I915_CACHE_NONE,
i915_gem_object_is_lmem(dst),
- sizes[i]);
+ sz);
i915_gem_object_unlock(dst);
i915_gem_object_put(dst);
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 7afdadc7656f..be9ac47fa9d0 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -122,17 +122,14 @@ enum intel_guc_action {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
- INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY = 0x1005,
- INTEL_GUC_ACTION_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
- INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
+ INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
INTEL_GUC_ACTION_SETUP_PC_GUCRC = 0x3004,
INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_GET_HWCONFIG = 0x4100,
INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
- INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
- INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
@@ -173,4 +170,11 @@ enum intel_guc_sleep_state_status {
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+enum intel_guc_state_capture_event_status {
+ INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1,
+};
+
+#define INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
+
#endif /* _ABI_GUC_ACTIONS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index c20658ee85a5..8085fb181274 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -8,6 +8,10 @@
enum intel_guc_response_status {
INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+ INTEL_GUC_RESPONSE_NOT_SUPPORTED = 0x20,
+ INTEL_GUC_RESPONSE_NO_ATTRIBUTE_TABLE = 0x201,
+ INTEL_GUC_RESPONSE_NO_DECRYPTION_KEY = 0x202,
+ INTEL_GUC_RESPONSE_DECRYPTION_FAILED = 0x204,
INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
};
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index f0814a57c191..4a59478c3b5c 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -6,6 +6,8 @@
#ifndef _ABI_GUC_KLVS_ABI_H
#define _ABI_GUC_KLVS_ABI_H
+#include <linux/types.h>
+
/**
* DOC: GuC KLV
*
@@ -79,4 +81,17 @@
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
+/*
+ * Per context scheduling policy update keys.
+ */
+enum {
+ GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002,
+ GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003,
+ GUC_CONTEXT_POLICIES_KLV_ID_PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY = 0x2004,
+ GUC_CONTEXT_POLICIES_KLV_ID_SLPM_GT_FREQUENCY = 0x2005,
+
+ GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
new file mode 100644
index 000000000000..3624abfd22d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CAPTURE_FWIF_H
+#define _INTEL_GUC_CAPTURE_FWIF_H
+
+#include <linux/types.h>
+#include "intel_guc_fwif.h"
+
+struct intel_guc;
+struct file;
+
+/**
+ * struct __guc_capture_bufstate
+ *
+ * Book-keeping structure used to track read and write pointers
+ * as we extract error capture data from the GuC-log-buffer's
+ * error-capture region as a stream of dwords.
+ */
+struct __guc_capture_bufstate {
+ u32 size;
+ void *data;
+ u32 rd;
+ u32 wr;
+};
+
+/**
+ * struct __guc_capture_parsed_output - extracted error capture node
+ *
+ * A single unit of extracted error-capture output data grouped together
+ * at an engine-instance level. We keep these nodes in a linked list.
+ * See cachelist and outlist below.
+ */
+struct __guc_capture_parsed_output {
+ /*
+ * A single set of 3 capture lists: a global-list
+ * an engine-class-list and an engine-instance list.
+ * outlist in __guc_capture_parsed_output will keep
+ * a linked list of these nodes that will eventually
+ * be detached from outlist and attached into to
+ * i915_gpu_codedump in response to a context reset
+ */
+ struct list_head link;
+ bool is_partial;
+ u32 eng_class;
+ u32 eng_inst;
+ u32 guc_id;
+ u32 lrca;
+ struct gcap_reg_list_info {
+ u32 vfid;
+ u32 num_regs;
+ struct guc_mmio_reg *regs;
+ } reginfo[GUC_CAPTURE_LIST_TYPE_MAX];
+#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_CAPTURE_LIST_TYPE_GLOBAL)
+#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS)
+#define GCAP_PARSED_REGLIST_INDEX_ENGINST BIT(GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE)
+};
+
+/**
+ * struct guc_debug_capture_list_header / struct guc_debug_capture_list
+ *
+ * As part of ADS registration, these header structures (followed by
+ * an array of 'struct guc_mmio_reg' entries) are used to register with
+ * GuC microkernel the list of registers we want it to dump out prior
+ * to a engine reset.
+ */
+struct guc_debug_capture_list_header {
+ u32 info;
+#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0)
+} __packed;
+
+struct guc_debug_capture_list {
+ struct guc_debug_capture_list_header header;
+ struct guc_mmio_reg regs[0];
+} __packed;
+
+/**
+ * struct __guc_mmio_reg_descr / struct __guc_mmio_reg_descr_group
+ *
+ * intel_guc_capture module uses these structures to maintain static
+ * tables (per unique platform) that consists of lists of registers
+ * (offsets, names, flags,...) that are used at the ADS regisration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr {
+ i915_reg_t reg;
+ u32 flags;
+ u32 mask;
+ const char *regname;
+};
+
+struct __guc_mmio_reg_descr_group {
+ const struct __guc_mmio_reg_descr *list;
+ u32 num_regs;
+ u32 owner; /* see enum guc_capture_owner */
+ u32 type; /* see enum guc_capture_type */
+ u32 engine; /* as per MAX_ENGINE_CLASS */
+ struct __guc_mmio_reg_descr *extlist; /* only used for steered registers */
+};
+
+/**
+ * struct guc_state_capture_header_t / struct guc_state_capture_t /
+ * guc_state_capture_group_header_t / guc_state_capture_group_t
+ *
+ * Prior to resetting engines that have hung or faulted, GuC microkernel
+ * reports the engine error-state (register values that was read) by
+ * logging them into the shared GuC log buffer using these hierarchy
+ * of structures.
+ */
+struct guc_state_capture_header_t {
+ u32 owner;
+#define CAP_HDR_CAPTURE_VFID GENMASK(7, 0)
+ u32 info;
+#define CAP_HDR_CAPTURE_TYPE GENMASK(3, 0) /* see enum guc_capture_type */
+#define CAP_HDR_ENGINE_CLASS GENMASK(7, 4) /* see GUC_MAX_ENGINE_CLASSES */
+#define CAP_HDR_ENGINE_INSTANCE GENMASK(11, 8)
+ u32 lrca; /* if type-instance, LRCA (address) that hung, else set to ~0 */
+ u32 guc_id; /* if type-instance, context index of hung context, else set to ~0 */
+ u32 num_mmios;
+#define CAP_HDR_NUM_MMIOS GENMASK(9, 0)
+} __packed;
+
+struct guc_state_capture_t {
+ struct guc_state_capture_header_t header;
+ struct guc_mmio_reg mmio_entries[0];
+} __packed;
+
+enum guc_capture_group_types {
+ GUC_STATE_CAPTURE_GROUP_TYPE_FULL,
+ GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL,
+ GUC_STATE_CAPTURE_GROUP_TYPE_MAX,
+};
+
+struct guc_state_capture_group_header_t {
+ u32 owner;
+#define CAP_GRP_HDR_CAPTURE_VFID GENMASK(7, 0)
+ u32 info;
+#define CAP_GRP_HDR_NUM_CAPTURES GENMASK(7, 0)
+#define CAP_GRP_HDR_CAPTURE_TYPE GENMASK(15, 8) /* guc_capture_group_types */
+} __packed;
+
+/* this is the top level structure where an error-capture dump starts */
+struct guc_state_capture_group_t {
+ struct guc_state_capture_group_header_t grp_header;
+ struct guc_state_capture_t capture_entries[0];
+} __packed;
+
+/**
+ * struct __guc_capture_ads_cache
+ *
+ * A structure to cache register lists that were populated and registered
+ * with GuC at startup during ADS registration. This allows much quicker
+ * GuC resets without re-parsing all the tables for the given gt.
+ */
+struct __guc_capture_ads_cache {
+ bool is_valid;
+ void *ptr;
+ size_t size;
+ int status;
+};
+
+/**
+ * struct intel_guc_state_capture
+ *
+ * Internal context of the intel_guc_capture module.
+ */
+struct intel_guc_state_capture {
+ /**
+ * @reglists: static table of register lists used for error-capture state.
+ */
+ const struct __guc_mmio_reg_descr_group *reglists;
+
+ /**
+ * @extlists: allocated table of steered register lists used for error-capture state.
+ *
+ * NOTE: steered registers have multiple instances depending on the HW configuration
+ * (slices or dual-sub-slices) and thus depends on HW fuses discovered at startup
+ */
+ struct __guc_mmio_reg_descr_group *extlists;
+
+ /**
+ * @ads_cache: cached register lists that is ADS format ready
+ */
+ struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
+ [GUC_CAPTURE_LIST_TYPE_MAX]
+ [GUC_MAX_ENGINE_CLASSES];
+ void *ads_null_cache;
+
+ /**
+ * @cachelist: Pool of pre-allocated nodes for error capture output
+ *
+ * We need this pool of pre-allocated nodes because we cannot
+ * dynamically allocate new nodes when receiving the G2H notification
+ * because the event handlers for all G2H event-processing is called
+ * by the ct processing worker queue and when that queue is being
+ * processed, there is no absoluate guarantee that we are not in the
+ * midst of a GT reset operation (which doesn't allow allocations).
+ */
+ struct list_head cachelist;
+#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
+#define PREALLOC_NODES_DEFAULT_NUMREGS 64
+ int max_mmio_per_node;
+
+ /**
+ * @outlist: Pool of pre-allocated nodes for error capture output
+ *
+ * A linked list of parsed GuC error-capture output data before
+ * reporting with formatting via i915_gpu_coredump. Each node in this linked list shall
+ * contain a single engine-capture including global, engine-class and
+ * engine-instance register dumps as per guc_capture_parsed_output_node
+ */
+ struct list_head outlist;
+};
+
+#endif /* _INTEL_GUC_CAPTURE_FWIF_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 447a976c9f25..2c4ad4a65089 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -9,8 +9,9 @@
#include "gt/intel_gt_pm_irq.h"
#include "gt/intel_gt_regs.h"
#include "intel_guc.h"
-#include "intel_guc_slpc.h"
#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -291,6 +292,41 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
flags |= GUC_WA_POLLCS;
+ /* Wa_16011759253:dg2_g10:a0 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ flags |= GUC_WA_GAM_CREDITS;
+
+ /* Wa_14014475959:dg2 */
+ if (IS_DG2(gt->i915))
+ flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+
+ /*
+ * Wa_14012197797:dg2_g10:a0,dg2_g11:a0
+ * Wa_22011391025:dg2_g10,dg2_g11,dg2_g12
+ *
+ * The same WA bit is used for both and 22011391025 is applicable to
+ * all DG2.
+ */
+ if (IS_DG2(gt->i915))
+ flags |= GUC_WA_DUAL_QUEUE;
+
+ /* Wa_22011802037: graphics version 12 */
+ if (GRAPHICS_VER(gt->i915) == 12)
+ flags |= GUC_WA_PRE_PARSER;
+
+ /* Wa_16011777198:dg2 */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0))
+ flags |= GUC_WA_RCS_RESET_BEFORE_RC6;
+
+ /*
+ * Wa_22012727170:dg2_g10[a0-c0), dg2_g11[a0..)
+ * Wa_22012727685:dg2_g11[a0..)
+ */
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_FOREVER))
+ flags |= GUC_WA_CONTEXT_ISOLATION;
+
return flags;
}
@@ -362,9 +398,14 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_fw;
- ret = intel_guc_ads_create(guc);
+ ret = intel_guc_capture_init(guc);
if (ret)
goto err_log;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_capture;
+
GEM_BUG_ON(!guc->ads_vma);
ret = intel_guc_ct_init(&guc->ct);
@@ -403,6 +444,8 @@ err_ct:
intel_guc_ct_fini(&guc->ct);
err_ads:
intel_guc_ads_destroy(guc);
+err_capture:
+ intel_guc_capture_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
@@ -430,6 +473,7 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
+ intel_guc_capture_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index bf7079480d47..3f3373f68123 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -10,18 +10,19 @@
#include <linux/iosys-map.h>
#include <linux/xarray.h>
-#include "intel_uncore.h"
+#include "intel_guc_ct.h"
#include "intel_guc_fw.h"
#include "intel_guc_fwif.h"
-#include "intel_guc_ct.h"
#include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_guc_slpc_types.h"
#include "intel_uc_fw.h"
+#include "intel_uncore.h"
#include "i915_utils.h"
#include "i915_vma.h"
struct __guc_ads_blob;
+struct intel_guc_state_capture;
/**
* struct intel_guc - Top level structure of GuC.
@@ -38,6 +39,8 @@ struct intel_guc {
struct intel_guc_ct ct;
/** @slpc: sub-structure containing SLPC related data and objects */
struct intel_guc_slpc slpc;
+ /** @capture: the error-state-capture module's data and objects */
+ struct intel_guc_state_capture *capture;
/** @sched_engine: Global engine used to submit requests to GuC */
struct i915_sched_engine *sched_engine;
@@ -138,6 +141,8 @@ struct intel_guc {
bool submission_supported;
/** @submission_selected: tracks whether the user enabled GuC submission */
bool submission_selected;
+ /** @submission_initialized: tracks whether GuC submission has been initialised */
+ bool submission_initialized;
/**
* @rc_supported: tracks whether we support GuC rc on the current platform
*/
@@ -160,14 +165,11 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_capture_size: size of register lists in the ADS used for error capture */
+ u32 ads_capture_size;
/** @ads_engine_usage_size: size of engine usage in the ADS */
u32 ads_engine_usage_size;
- /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
- struct i915_vma *lrc_desc_pool;
- /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */
- void *lrc_desc_pool_vaddr;
-
/**
* @context_lookup: used to resolve intel_context from guc_id, if a
* context is present in this structure it is registered with the GuC
@@ -431,6 +433,9 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
int intel_guc_error_capture_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len);
+struct intel_engine_cs *
+intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance);
+
void intel_guc_find_hung_context(struct intel_engine_cs *engine);
int intel_guc_global_policies_update(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 92cb88248391..3eabf4cf8eec 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -11,6 +11,7 @@
#include "gt/intel_lrc.h"
#include "gt/shmem_utils.h"
#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -86,8 +87,7 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
- /* FIXME: Allocate a proper capture list */
- return PAGE_ALIGN(PAGE_SIZE);
+ return PAGE_ALIGN(guc->ads_capture_size);
}
static u32 guc_ads_private_data_size(struct intel_guc *guc)
@@ -276,15 +276,24 @@ __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
return slot;
}
-static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
- u32 offset, u32 flags)
+#define GUC_REGSET_STEERING(group, instance) ( \
+ FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
+ FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
+ GUC_REGSET_NEEDS_STEERING \
+)
+
+static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
+ struct temp_regset *regset,
+ i915_reg_t reg, u32 flags)
{
u32 count = regset->storage_used - (regset->registers - regset->storage);
- struct guc_mmio_reg reg = {
+ u32 offset = i915_mmio_reg_offset(reg);
+ struct guc_mmio_reg entry = {
.offset = offset,
.flags = flags,
};
struct guc_mmio_reg *slot;
+ u8 group, inst;
/*
* The mmio list is built using separate lists within the driver.
@@ -292,11 +301,22 @@ static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
* register more than once. Do not consider this an error; silently
* move on if the register is already in the list.
*/
- if (bsearch(&reg, regset->registers, count,
- sizeof(reg), guc_mmio_reg_cmp))
+ if (bsearch(&entry, regset->registers, count,
+ sizeof(entry), guc_mmio_reg_cmp))
return 0;
- slot = __mmio_reg_add(regset, &reg);
+ /*
+ * The GuC doesn't have a default steering, so we need to explicitly
+ * steer all registers that need steering. However, we do not keep track
+ * of all the steering ranges, only of those that have a chance of using
+ * a non-default steering from the i915 pov. Instead of adding such
+ * tracking, it is easier to just program the default steering for all
+ * regs that don't need a non-default one.
+ */
+ intel_gt_get_valid_steering_for_reg(gt, reg, &group, &inst);
+ entry.flags |= GUC_REGSET_STEERING(group, inst);
+
+ slot = __mmio_reg_add(regset, &entry);
if (IS_ERR(slot))
return PTR_ERR(slot);
@@ -311,14 +331,16 @@ static long __must_check guc_mmio_reg_add(struct temp_regset *regset,
return 0;
}
-#define GUC_MMIO_REG_ADD(regset, reg, masked) \
- guc_mmio_reg_add(regset, \
- i915_mmio_reg_offset((reg)), \
+#define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
+ guc_mmio_reg_add(gt, \
+ regset, \
+ (reg), \
(masked) ? GUC_REGSET_MASKED : 0)
static int guc_mmio_regset_init(struct temp_regset *regset,
struct intel_engine_cs *engine)
{
+ struct intel_gt *gt = engine->gt;
const u32 base = engine->mmio_base;
struct i915_wa_list *wal = &engine->wa_list;
struct i915_wa *wa;
@@ -331,26 +353,26 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
*/
regset->registers = regset->storage + regset->storage_used;
- ret |= GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
- ret |= GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
- ret |= GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_MODE_GEN7(base), true);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_HWS_PGA(base), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, RING_IMR(base), false);
- if (engine->class == RENDER_CLASS &&
+ if ((engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) &&
CCS_MASK(engine->gt))
- ret |= GUC_MMIO_REG_ADD(regset, GEN12_RCU_MODE, true);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN12_RCU_MODE, true);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ret |= GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, wa->reg, wa->masked_reg);
/* Be extra paranoid and include all whitelist registers. */
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
- ret |= GUC_MMIO_REG_ADD(regset,
+ ret |= GUC_MMIO_REG_ADD(gt, regset,
RING_FORCE_TO_NONPRIV(base, i),
false);
/* add in local MOCS registers */
for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
- ret |= GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
return ret ? -1 : 0;
}
@@ -433,7 +455,7 @@ static void guc_mmio_reg_state_init(struct intel_guc *guc)
static void fill_engine_enable_masks(struct intel_gt *gt,
struct iosys_map *info_map)
{
- info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], 1);
+ info_map_write(info_map, engine_enabled_masks[GUC_RENDER_CLASS], RCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_COMPUTE_CLASS], CCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], 1);
info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
@@ -589,24 +611,119 @@ static void guc_init_golden_context(struct intel_guc *guc)
GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
}
-static void guc_capture_list_init(struct intel_guc *guc)
+static int
+guc_capture_prep_lists(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
+ struct guc_gt_system_info local_info;
+ struct iosys_map info_map;
+ bool ads_is_mapped;
+ size_t size = 0;
+ void *ptr;
int i, j;
- u32 addr_ggtt, offset;
- offset = guc_ads_capture_offset(guc);
- addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+ ads_is_mapped = !iosys_map_is_null(&guc->ads_map);
+ if (ads_is_mapped) {
+ capture_offset = guc_ads_capture_offset(guc);
+ ads_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma);
+ info_map = IOSYS_MAP_INIT_OFFSET(&guc->ads_map,
+ offsetof(struct __guc_ads_blob, system_info));
+ } else {
+ memset(&local_info, 0, sizeof(local_info));
+ iosys_map_set_vaddr(&info_map, &local_info);
+ fill_engine_enable_masks(gt, &info_map);
+ }
- /* FIXME: Populate a proper capture list */
+ /* first, set aside the first page for a capture_list with zero descriptors */
+ total_size = PAGE_SIZE;
+ if (ads_is_mapped) {
+ if (!intel_guc_capture_getnullheader(guc, &ptr, &size))
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ null_ggtt = ads_ggtt + capture_offset;
+ capture_offset += PAGE_SIZE;
+ }
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
- ads_blob_write(guc, ads.capture_instance[i][j], addr_ggtt);
- ads_blob_write(guc, ads.capture_class[i][j], addr_ggtt);
- }
- ads_blob_write(guc, ads.capture_global[i], addr_ggtt);
+ /* null list if we dont have said engine or list */
+ if (!info_map_read(&info_map, engine_enabled_masks[j])) {
+ if (ads_is_mapped) {
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ }
+ continue;
+ }
+ if (intel_guc_capture_getlistsize(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ j, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ goto engine_instance_list;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ j, &ptr)) {
+ ads_blob_write(guc, ads.capture_class[i][j], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_class[i][j], ads_ggtt +
+ capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+engine_instance_list:
+ if (intel_guc_capture_getlistsize(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ j, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ j, &ptr)) {
+ ads_blob_write(guc, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_instance[i][j], ads_ggtt +
+ capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
+ }
+ if (intel_guc_capture_getlistsize(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &size)) {
+ if (ads_is_mapped)
+ ads_blob_write(guc, ads.capture_global[i], null_ggtt);
+ continue;
+ }
+ total_size += size;
+ if (ads_is_mapped) {
+ if (total_size > guc->ads_capture_size ||
+ intel_guc_capture_getlist(guc, i, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0,
+ &ptr)) {
+ ads_blob_write(guc, ads.capture_global[i], null_ggtt);
+ continue;
+ }
+ ads_blob_write(guc, ads.capture_global[i], ads_ggtt + capture_offset);
+ iosys_map_memcpy_to(&guc->ads_map, capture_offset, ptr, size);
+ capture_offset += size;
+ }
}
+
+ if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
+ drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n",
+ guc->ads_capture_size, PAGE_ALIGN(total_size));
+
+ return PAGE_ALIGN(total_size);
}
static void __guc_ads_init(struct intel_guc *guc)
@@ -644,8 +761,8 @@ static void __guc_ads_init(struct intel_guc *guc)
base = intel_guc_ggtt_offset(guc, guc->ads_vma);
- /* Capture list for hang debug */
- guc_capture_list_init(guc);
+ /* Lists for error capture debug */
+ guc_capture_prep_lists(guc);
/* ADS */
ads_blob_write(guc, ads.scheduler_policies, base +
@@ -693,6 +810,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_golden_ctxt_size = ret;
+ /* Likewise the capture lists: */
+ ret = guc_capture_prep_lists(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_capture_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
new file mode 100644
index 000000000000..c4e25966d3e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -0,0 +1,1657 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2022 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_print.h>
+
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_lrc.h"
+#include "guc_capture_fwif.h"
+#include "intel_guc_capture.h"
+#include "intel_guc_fwif.h"
+#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_irq.h"
+#include "i915_memcpy.h"
+#include "i915_reg.h"
+
+/*
+ * Define all device tables of GuC error capture register lists
+ * NOTE: For engine-registers, GuC only needs the register offsets
+ * from the engine-mmio-base
+ */
+#define COMMON_BASE_GLOBAL \
+ { FORCEWAKE_MT, 0, 0, "FORCEWAKE" }
+
+#define COMMON_GEN9BASE_GLOBAL \
+ { GEN8_FAULT_TLB_DATA0, 0, 0, "GEN8_FAULT_TLB_DATA0" }, \
+ { GEN8_FAULT_TLB_DATA1, 0, 0, "GEN8_FAULT_TLB_DATA1" }, \
+ { ERROR_GEN6, 0, 0, "ERROR_GEN6" }, \
+ { DONE_REG, 0, 0, "DONE_REG" }, \
+ { HSW_GTT_CACHE_EN, 0, 0, "HSW_GTT_CACHE_EN" }
+
+#define COMMON_GEN12BASE_GLOBAL \
+ { GEN12_FAULT_TLB_DATA0, 0, 0, "GEN12_FAULT_TLB_DATA0" }, \
+ { GEN12_FAULT_TLB_DATA1, 0, 0, "GEN12_FAULT_TLB_DATA1" }, \
+ { GEN12_AUX_ERR_DBG, 0, 0, "AUX_ERR_DBG" }, \
+ { GEN12_GAM_DONE, 0, 0, "GAM_DONE" }, \
+ { GEN12_RING_FAULT_REG, 0, 0, "FAULT_REG" }
+
+#define COMMON_BASE_ENGINE_INSTANCE \
+ { RING_PSMI_CTL(0), 0, 0, "RC PSMI" }, \
+ { RING_ESR(0), 0, 0, "ESR" }, \
+ { RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
+ { RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_IPEIR(0), 0, 0, "IPEIR" }, \
+ { RING_IPEHR(0), 0, 0, "IPEHR" }, \
+ { RING_INSTPS(0), 0, 0, "INSTPS" }, \
+ { RING_BBADDR(0), 0, 0, "RING_BBADDR_LOW32" }, \
+ { RING_BBADDR_UDW(0), 0, 0, "RING_BBADDR_UP32" }, \
+ { RING_BBSTATE(0), 0, 0, "BB_STATE" }, \
+ { CCID(0), 0, 0, "CCID" }, \
+ { RING_ACTHD(0), 0, 0, "ACTHD_LDW" }, \
+ { RING_ACTHD_UDW(0), 0, 0, "ACTHD_UDW" }, \
+ { RING_INSTPM(0), 0, 0, "INSTPM" }, \
+ { RING_INSTDONE(0), 0, 0, "INSTDONE" }, \
+ { RING_NOPID(0), 0, 0, "RING_NOPID" }, \
+ { RING_START(0), 0, 0, "START" }, \
+ { RING_HEAD(0), 0, 0, "HEAD" }, \
+ { RING_TAIL(0), 0, 0, "TAIL" }, \
+ { RING_CTL(0), 0, 0, "CTL" }, \
+ { RING_MI_MODE(0), 0, 0, "MODE" }, \
+ { RING_CONTEXT_CONTROL(0), 0, 0, "RING_CONTEXT_CONTROL" }, \
+ { RING_HWS_PGA(0), 0, 0, "HWS" }, \
+ { RING_MODE_GEN7(0), 0, 0, "GFX_MODE" }, \
+ { GEN8_RING_PDP_LDW(0, 0), 0, 0, "PDP0_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 0), 0, 0, "PDP0_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 1), 0, 0, "PDP1_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 1), 0, 0, "PDP1_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 2), 0, 0, "PDP2_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 2), 0, 0, "PDP2_UDW" }, \
+ { GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
+ { GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
+
+#define COMMON_BASE_HAS_EU \
+ { EIR, 0, 0, "EIR" }
+
+#define COMMON_BASE_RENDER \
+ { GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
+
+#define COMMON_GEN12BASE_RENDER \
+ { GEN12_SC_INSTDONE_EXTRA, 0, 0, "GEN12_SC_INSTDONE_EXTRA" }, \
+ { GEN12_SC_INSTDONE_EXTRA2, 0, 0, "GEN12_SC_INSTDONE_EXTRA2" }
+
+#define COMMON_GEN12BASE_VEC \
+ { GEN12_SFC_DONE(0), 0, 0, "SFC_DONE[0]" }, \
+ { GEN12_SFC_DONE(1), 0, 0, "SFC_DONE[1]" }, \
+ { GEN12_SFC_DONE(2), 0, 0, "SFC_DONE[2]" }, \
+ { GEN12_SFC_DONE(3), 0, 0, "SFC_DONE[3]" }
+
+/* XE_LPD - Global */
+static const struct __guc_mmio_reg_descr xe_lpd_global_regs[] = {
+ COMMON_BASE_GLOBAL,
+ COMMON_GEN9BASE_GLOBAL,
+ COMMON_GEN12BASE_GLOBAL,
+};
+
+/* XE_LPD - Render / Compute Per-Class */
+static const struct __guc_mmio_reg_descr xe_lpd_rc_class_regs[] = {
+ COMMON_BASE_HAS_EU,
+ COMMON_BASE_RENDER,
+ COMMON_GEN12BASE_RENDER,
+};
+
+/* GEN9/XE_LPD - Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9/XE_LPD - Media Decode/Encode Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_vd_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* XE_LPD - Video Enhancement Per-Class */
+static const struct __guc_mmio_reg_descr xe_lpd_vec_class_regs[] = {
+ COMMON_GEN12BASE_VEC,
+};
+
+/* GEN9/XE_LPD - Video Enhancement Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_vec_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9/XE_LPD - Blitter Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* GEN9 - Global */
+static const struct __guc_mmio_reg_descr default_global_regs[] = {
+ COMMON_BASE_GLOBAL,
+ COMMON_GEN9BASE_GLOBAL,
+};
+
+static const struct __guc_mmio_reg_descr default_rc_class_regs[] = {
+ COMMON_BASE_HAS_EU,
+ COMMON_BASE_RENDER,
+};
+
+/*
+ * Empty lists:
+ * GEN9/XE_LPD - Blitter Per-Class
+ * GEN9/XE_LPD - Media Decode/Encode Per-Class
+ * GEN9 - VEC Class
+ */
+static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+};
+
+#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
+#define TO_GCAP_DEF_TYPE(x) (GUC_CAPTURE_LIST_TYPE_##x)
+#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
+ { \
+ regslist, \
+ ARRAY_SIZE(regslist), \
+ TO_GCAP_DEF_OWNER(regsowner), \
+ TO_GCAP_DEF_TYPE(regstype), \
+ class, \
+ NULL, \
+ }
+
+/* List of lists */
+static struct __guc_mmio_reg_descr_group default_lists[] = {
+ MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ {}
+};
+
+static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
+ MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ {}
+};
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].list; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+static struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_ext_list(struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, u32 id)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].extlist; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == id || reglists[i].type == GUC_CAPTURE_LIST_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglists)
+{
+ int i = 0;
+
+ if (!reglists)
+ return;
+
+ while (reglists[i].extlist)
+ kfree(reglists[i++].extlist);
+}
+
+struct __ext_steer_reg {
+ const char *name;
+ i915_reg_t reg;
+};
+
+static const struct __ext_steer_reg xe_extregs[] = {
+ {"GEN7_SAMPLER_INSTDONE", GEN7_SAMPLER_INSTDONE},
+ {"GEN7_ROW_INSTDONE", GEN7_ROW_INSTDONE}
+};
+
+static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
+ const struct __ext_steer_reg *extlist,
+ int slice_id, int subslice_id)
+{
+ ext->reg = extlist->reg;
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->regname = extlist->name;
+}
+
+static int
+__alloc_ext_regs(struct __guc_mmio_reg_descr_group *newlist,
+ const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
+{
+ struct __guc_mmio_reg_descr *list;
+
+ list = kcalloc(num_regs, sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ newlist->extlist = list;
+ newlist->num_regs = num_regs;
+ newlist->owner = rootlist->owner;
+ newlist->engine = rootlist->engine;
+ newlist->type = rootlist->type;
+
+ return 0;
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_lpd(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int slice, subslice, i, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+ struct sseu_dev_info *sseu;
+
+ /* In XE_LPD we only have steered registers for the render-class */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+
+ sseu = &gt->info.sseu;
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice)
+ num_tot_regs += num_steer_regs;
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
+ for (i = 0; i < num_steer_regs; ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+
+ guc->capture->extlists = extlists;
+}
+
+static const struct __ext_steer_reg xehpg_extregs[] = {
+ {"XEHPG_INSTDONE_GEOM_SVG", XEHPG_INSTDONE_GEOM_SVG}
+};
+
+static bool __has_xehpg_extregs(u32 ipver)
+{
+ return (ipver >= IP_VER(12, 55));
+}
+
+static void
+guc_capture_alloc_steered_lists_xe_hpg(struct intel_guc *guc,
+ const struct __guc_mmio_reg_descr_group *lists,
+ u32 ipver)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct sseu_dev_info *sseu;
+ int slice, subslice, i, iter, num_steer_regs, num_tot_regs = 0;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+
+ /* In XE_LP / HPG we only have render-class steering registers during error-capture */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS, GUC_RENDER_CLASS);
+ /* skip if extlists was previously allocated */
+ if (!list || guc->capture->extlists)
+ return;
+
+ num_steer_regs = ARRAY_SIZE(xe_extregs);
+ if (__has_xehpg_extregs(ipver))
+ num_steer_regs += ARRAY_SIZE(xehpg_extregs);
+
+ sseu = &gt->info.sseu;
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ num_tot_regs += num_steer_regs;
+ }
+
+ if (!num_tot_regs)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = kcalloc(2, sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(&extlists[0], list, num_tot_regs)) {
+ kfree(extlists);
+ return;
+ }
+
+ extarray = extlists[0].extlist;
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ if (__has_xehpg_extregs(ipver)) {
+ for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
+ __fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
+ ++extarray;
+ }
+ }
+ }
+
+ drm_dbg(&i915->drm, "GuC-capture found %d-ext-regs.\n", num_tot_regs);
+ guc->capture->extlists = extlists;
+}
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_device_reglist(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ if (GRAPHICS_VER(i915) > 11) {
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these at init time based on hw config add it as an extension
+ * list at the end of the pre-populated render list.
+ */
+ if (IS_DG2(i915))
+ guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 55));
+ else if (IS_XEHPSDV(i915))
+ guc_capture_alloc_steered_lists_xe_hpg(guc, xe_lpd_lists, IP_VER(12, 50));
+ else
+ guc_capture_alloc_steered_lists_xe_lpd(guc, xe_lpd_lists);
+
+ return xe_lpd_lists;
+ }
+
+ /* if GuC submission is enabled on a non-POR platform, just use a common baseline */
+ return default_lists;
+}
+
+static const char *
+__stringify_owner(u32 owner)
+{
+ switch (owner) {
+ case GUC_CAPTURE_LIST_INDEX_PF:
+ return "PF";
+ case GUC_CAPTURE_LIST_INDEX_VF:
+ return "VF";
+ default:
+ return "unknown";
+ }
+
+ return "";
+}
+
+static const char *
+__stringify_type(u32 type)
+{
+ switch (type) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ return "Global";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ return "Class";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ return "Instance";
+ default:
+ return "unknown";
+ }
+
+ return "";
+}
+
+static const char *
+__stringify_engclass(u32 class)
+{
+ switch (class) {
+ case GUC_RENDER_CLASS:
+ return "Render";
+ case GUC_VIDEO_CLASS:
+ return "Video";
+ case GUC_VIDEOENHANCE_CLASS:
+ return "VideoEnhance";
+ case GUC_BLITTER_CLASS:
+ return "Blitter";
+ case GUC_COMPUTE_CLASS:
+ return "Compute";
+ default:
+ return "unknown";
+ }
+
+ return "";
+}
+
+static void
+guc_capture_warn_with_list_info(struct drm_i915_private *i915, char *msg,
+ u32 owner, u32 type, u32 classid)
+{
+ if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers.\n", msg,
+ __stringify_owner(owner), __stringify_type(type));
+ else
+ drm_dbg(&i915->drm, "GuC-capture: %s for %s %s-Registers on %s-Engine\n", msg,
+ __stringify_owner(owner), __stringify_type(type),
+ __stringify_engclass(classid));
+}
+
+static int
+guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ struct guc_mmio_reg *ptr, u16 num_entries)
+{
+ u32 i = 0, j = 0;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+
+ if (!reglists)
+ return -ENODEV;
+
+ match = guc_capture_get_one_list(reglists, owner, type, classid);
+ if (!match) {
+ guc_capture_warn_with_list_info(i915, "Missing register list init", owner, type,
+ classid);
+ return -ENODATA;
+ }
+
+ for (i = 0; i < num_entries && i < match->num_regs; ++i) {
+ ptr[i].offset = match->list[i].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = match->list[i].flags;
+ ptr[i].mask = match->list[i].mask;
+ }
+
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, classid);
+ if (matchext) {
+ for (i = match->num_regs, j = 0; i < num_entries &&
+ i < (match->num_regs + matchext->num_regs) &&
+ j < matchext->num_regs; ++i, ++j) {
+ ptr[i].offset = matchext->extlist[j].reg.reg;
+ ptr[i].value = 0xDEADF00D;
+ ptr[i].flags = matchext->extlist[j].flags;
+ ptr[i].mask = matchext->extlist[j].mask;
+ }
+ }
+ if (i < num_entries)
+ drm_dbg(&i915->drm, "GuC-capture: Init reglist short %d out %d.\n",
+ (int)i, (int)num_entries);
+
+ return 0;
+}
+
+static int
+guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u32 classid)
+{
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int num_regs;
+
+ match = guc_capture_get_one_list(gc->reglists, owner, type, classid);
+ if (!match)
+ return 0;
+
+ num_regs = match->num_regs;
+
+ matchext = guc_capture_get_one_ext_list(gc->extlists, owner, type, classid);
+ if (matchext)
+ num_regs += matchext->num_regs;
+
+ return num_regs;
+}
+
+int
+intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ int num_regs;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *size = cache->size;
+ return cache->status;
+ }
+
+ num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
+ if (!num_regs) {
+ guc_capture_warn_with_list_info(i915, "Missing register list size",
+ owner, type, classid);
+ return -ENODATA;
+ }
+
+ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+ (num_regs * sizeof(struct guc_mmio_reg)));
+
+ return 0;
+}
+
+static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
+
+int
+intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ void **outptr)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_debug_capture_list *listnode;
+ int ret, num_regs;
+ u8 *caplist, *tmp;
+ size_t size = 0;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *outptr = cache->ptr;
+ return cache->status;
+ }
+
+ /*
+ * ADS population of input registers is a good
+ * time to pre-allocate cachelist output nodes
+ */
+ guc_capture_create_prealloc_nodes(guc);
+
+ ret = intel_guc_capture_getlistsize(guc, owner, type, classid, &size);
+ if (ret) {
+ cache->is_valid = true;
+ cache->ptr = NULL;
+ cache->size = 0;
+ cache->status = ret;
+ return ret;
+ }
+
+ caplist = kzalloc(size, GFP_KERNEL);
+ if (!caplist) {
+ drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached caplist");
+ return -ENOMEM;
+ }
+
+ /* populate capture list header */
+ tmp = caplist;
+ num_regs = guc_cap_list_num_regs(guc->capture, owner, type, classid);
+ listnode = (struct guc_debug_capture_list *)tmp;
+ listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
+
+ /* populate list of register descriptor */
+ tmp += sizeof(struct guc_debug_capture_list);
+ guc_capture_list_init(guc, owner, type, classid, (struct guc_mmio_reg *)tmp, num_regs);
+
+ /* cache this list */
+ cache->is_valid = true;
+ cache->ptr = caplist;
+ cache->size = size;
+ cache->status = 0;
+
+ *outptr = caplist;
+
+ return 0;
+}
+
+int
+intel_guc_capture_getnullheader(struct intel_guc *guc,
+ void **outptr, size_t *size)
+{
+ struct intel_guc_state_capture *gc = guc->capture;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int tmp = sizeof(u32) * 4;
+ void *null_header;
+
+ if (gc->ads_null_cache) {
+ *outptr = gc->ads_null_cache;
+ *size = tmp;
+ return 0;
+ }
+
+ null_header = kzalloc(tmp, GFP_KERNEL);
+ if (!null_header) {
+ drm_dbg(&i915->drm, "GuC-capture: failed to alloc cached nulllist");
+ return -ENOMEM;
+ }
+
+ gc->ads_null_cache = null_header;
+ *outptr = null_header;
+ *size = tmp;
+
+ return 0;
+}
+
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+int
+intel_guc_capture_output_min_size_est(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int worst_min_size = 0, num_regs = 0;
+ size_t tmp = 0;
+
+ if (!guc->capture)
+ return -ENODEV;
+
+ /*
+ * If every single engine-instance suffered a failure in quick succession but
+ * were all unrelated, then a burst of multiple error-capture events would dump
+ * registers for every one engine instance, one at a time. In this case, GuC
+ * would even dump the global-registers repeatedly.
+ *
+ * For each engine instance, there would be 1 x guc_state_capture_group_t output
+ * followed by 3 x guc_state_capture_t lists. The latter is how the register
+ * dumps are split across different register types (where the '3' are global vs class
+ * vs instance). Finally, let's multiply the whole thing by 3x (just so we are
+ * not limited to just 1 round of data in a worst case full register dump log)
+ *
+ * NOTE: intel_guc_log that allocates the log buffer would round this size up to
+ * a power of two.
+ */
+
+ for_each_engine(engine, gt, id) {
+ worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
+ (3 * sizeof(struct guc_state_capture_header_t));
+
+ if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp))
+ num_regs += tmp;
+
+ if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ engine->class, &tmp)) {
+ num_regs += tmp;
+ }
+ if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ engine->class, &tmp)) {
+ num_regs += tmp;
+ }
+ }
+
+ worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
+
+ return (worst_min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER);
+}
+
+/*
+ * KMD Init time flows:
+ * --------------------
+ * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
+ * intel_guc_ads acquires the register lists by calling
+ * intel_guc_capture_list_size and intel_guc_capture_list_get 'n' times,
+ * where n = 1 for global-reg-list +
+ * num_engine_classes for class-reg-list +
+ * num_engine_classes for instance-reg-list
+ * (since all instances of the same engine-class type
+ * have an identical engine-instance register-list).
+ * ADS module also calls separately for PF vs VF.
+ *
+ * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
+ * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
+ * Note2: 'x 3' to hold multiple capture groups
+ *
+ * GUC Runtime notify capture:
+ * --------------------------
+ * --> G2H STATE_CAPTURE_NOTIFICATION
+ * L--> intel_guc_capture_process
+ * L--> Loop through B (head..tail) and for each engine instance's
+ * err-state-captured register-list we find, we alloc 'C':
+ * --> alloc C: A capture-output-node structure that includes misc capture info along
+ * with 3 register list dumps (global, engine-class and engine-instance)
+ * This node is created from a pre-allocated list of blank nodes in
+ * guc->capture->cachelist and populated with the error-capture
+ * data from GuC and then it's added into guc->capture->outlist linked
+ * list. This list is used for matchup and printout by i915_gpu_coredump
+ * and err_print_gt, (when user invokes the error capture sysfs).
+ *
+ * GUC --> notify context reset:
+ * -----------------------------
+ * --> G2H CONTEXT RESET
+ * L--> guc_handle_context_reset --> i915_capture_error_state
+ * L--> i915_gpu_coredump(..IS_GUC_CAPTURE) --> gt_record_engines
+ * --> capture_engine(..IS_GUC_CAPTURE)
+ * L--> intel_guc_capture_get_matching_node is where
+ * detach C from internal linked list and add it into
+ * intel_engine_coredump struct (if the context and
+ * engine of the event notification matches a node
+ * in the link list).
+ *
+ * User Sysfs / Debugfs
+ * --------------------
+ * --> i915_gpu_coredump_copy_to_buffer->
+ * L--> err_print_to_sgl --> err_print_gt
+ * L--> error_print_guc_captures
+ * L--> intel_guc_capture_print_node prints the
+ * register lists values of the attached node
+ * on the error-engine-dump being reported.
+ * L--> i915_reset_error_state ... -->__i915_gpu_coredump_free
+ * L--> ... cleanup_gt -->
+ * L--> intel_guc_capture_free_node returns the
+ * capture-output-node back to the internal
+ * cachelist for reuse.
+ *
+ */
+
+static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
+{
+ if (buf->wr >= buf->rd)
+ return (buf->wr - buf->rd);
+ return (buf->size - buf->rd) + buf->wr;
+}
+
+static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
+{
+ if (buf->rd > buf->wr)
+ return (buf->size - buf->rd);
+ return (buf->wr - buf->rd);
+}
+
+/*
+ * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
+ *
+ * The GuC Log buffer region for error-capture is managed like a ring buffer.
+ * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
+ * Additionally, as per the current and foreseeable future, all packed error-
+ * capture output structures are dword aligned.
+ *
+ * That said, if the GuC firmware is in the midst of writing a structure that is larger
+ * than one dword but the tail end of the err-capture buffer-region has lesser space left,
+ * we would need to extract that structure one dword at a time straddled across the end,
+ * onto the start of the ring.
+ *
+ * Below function, guc_capture_log_remove_dw is a helper for that. All callers of this
+ * function would typically do a straight-up memcpy from the ring contents and will only
+ * call this helper if their structure-extraction is straddling across the end of the
+ * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
+ * scalability for future expansion of output data types without requiring a redesign
+ * of the flow controls.
+ */
+static int
+guc_capture_log_remove_dw(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ u32 *dw)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int tries = 2;
+ int avail = 0;
+ u32 *src_data;
+
+ if (!guc_capture_buf_cnt(buf))
+ return 0;
+
+ while (tries--) {
+ avail = guc_capture_buf_cnt_to_end(buf);
+ if (avail >= sizeof(u32)) {
+ src_data = (u32 *)(buf->data + buf->rd);
+ *dw = *src_data;
+ buf->rd += 4;
+ return 4;
+ }
+ if (avail)
+ drm_dbg(&i915->drm, "GuC-Cap-Logs not dword aligned, skipping.\n");
+ buf->rd = 0;
+ }
+
+ return 0;
+}
+
+static bool
+guc_capture_data_extracted(struct __guc_capture_bufstate *b,
+ int size, void *dest)
+{
+ if (guc_capture_buf_cnt_to_end(b) >= size) {
+ memcpy(dest, (b->data + b->rd), size);
+ b->rd += size;
+ return true;
+ }
+ return false;
+}
+
+static int
+guc_capture_log_get_group_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_group_header_t *ghdr)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_state_capture_group_header_t);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)ghdr))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &ghdr->owner);
+ read += guc_capture_log_remove_dw(guc, buf, &ghdr->info);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static int
+guc_capture_log_get_data_hdr(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_header_t *hdr)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_state_capture_header_t);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)hdr))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->owner);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->info);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->lrca);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->guc_id);
+ read += guc_capture_log_remove_dw(guc, buf, &hdr->num_mmios);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static int
+guc_capture_log_get_register(struct intel_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_mmio_reg *reg)
+{
+ int read = 0;
+ int fullsize = sizeof(struct guc_mmio_reg);
+
+ if (fullsize > guc_capture_buf_cnt(buf))
+ return -1;
+
+ if (guc_capture_data_extracted(buf, fullsize, (void *)reg))
+ return 0;
+
+ read += guc_capture_log_remove_dw(guc, buf, &reg->offset);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->value);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->flags);
+ read += guc_capture_log_remove_dw(guc, buf, &reg->mask);
+ if (read != fullsize)
+ return -1;
+
+ return 0;
+}
+
+static void
+guc_capture_delete_one_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ int i;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
+ kfree(node->reginfo[i].regs);
+ list_del(&node->link);
+ kfree(node);
+}
+
+static void
+guc_capture_delete_prealloc_nodes(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /*
+ * NOTE: At the end of driver operation, we must assume that we
+ * have prealloc nodes in both the cachelist as well as outlist
+ * if unclaimed error capture events occurred prior to shutdown.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link)
+ guc_capture_delete_one_node(guc, n);
+
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link)
+ guc_capture_delete_one_node(guc, n);
+}
+
+static void
+guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
+ struct list_head *list)
+{
+ list_add_tail(&node->link, list);
+}
+
+static void
+guc_capture_add_node_to_outlist(struct intel_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->outlist);
+}
+
+static void
+guc_capture_add_node_to_cachelist(struct intel_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->cachelist);
+}
+
+static void
+guc_capture_init_node(struct intel_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ struct guc_mmio_reg *tmp[GUC_CAPTURE_LIST_TYPE_MAX];
+ int i;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ tmp[i] = node->reginfo[i].regs;
+ memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
+ guc->capture->max_mmio_per_node);
+ }
+ memset(node, 0, sizeof(*node));
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i)
+ node->reginfo[i].regs = tmp[i];
+
+ INIT_LIST_HEAD(&node->link);
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_get_prealloc_node(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *found = NULL;
+
+ if (!list_empty(&guc->capture->cachelist)) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* get first avail node from the cache list */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
+ found = n;
+ list_del(&n->link);
+ break;
+ }
+ } else {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* traverse down and steal back the oldest node already allocated */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ found = n;
+ }
+ if (found)
+ list_del(&found->link);
+ }
+ if (found)
+ guc_capture_init_node(guc, found);
+
+ return found;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_alloc_one_node(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ new->reginfo[i].regs = kcalloc(guc->capture->max_mmio_per_node,
+ sizeof(struct guc_mmio_reg), GFP_KERNEL);
+ if (!new->reginfo[i].regs) {
+ while (i)
+ kfree(new->reginfo[--i].regs);
+ kfree(new);
+ return NULL;
+ }
+ }
+ guc_capture_init_node(guc, new);
+
+ return new;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_clone_node(struct intel_guc *guc, struct __guc_capture_parsed_output *original,
+ u32 keep_reglist_mask)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return NULL;
+ if (!original)
+ return new;
+
+ new->is_partial = original->is_partial;
+
+ /* copy reg-lists that we want to clone */
+ for (i = 0; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ if (keep_reglist_mask & BIT(i)) {
+ GEM_BUG_ON(original->reginfo[i].num_regs >
+ guc->capture->max_mmio_per_node);
+
+ memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
+ original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
+
+ new->reginfo[i].num_regs = original->reginfo[i].num_regs;
+ new->reginfo[i].vfid = original->reginfo[i].vfid;
+
+ if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS) {
+ new->eng_class = original->eng_class;
+ } else if (i == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
+ new->eng_inst = original->eng_inst;
+ new->guc_id = original->guc_id;
+ new->lrca = original->lrca;
+ }
+ }
+ }
+
+ return new;
+}
+
+static void
+__guc_capture_create_prealloc_nodes(struct intel_guc *guc)
+{
+ struct __guc_capture_parsed_output *node = NULL;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int i;
+
+ for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
+ node = guc_capture_alloc_one_node(guc);
+ if (!node) {
+ drm_warn(&i915->drm, "GuC Capture pre-alloc-cache failure\n");
+ /* dont free the priors, use what we got and cleanup at shutdown */
+ return;
+ }
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+}
+
+static int
+guc_get_max_reglist_count(struct intel_guc *guc)
+{
+ int i, j, k, tmp, maxregcount = 0;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
+ if (j == GUC_CAPTURE_LIST_TYPE_GLOBAL && k > 0)
+ continue;
+
+ tmp = guc_cap_list_num_regs(guc->capture, i, j, k);
+ if (tmp > maxregcount)
+ maxregcount = tmp;
+ }
+ }
+ }
+ if (!maxregcount)
+ maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
+
+ return maxregcount;
+}
+
+static void
+guc_capture_create_prealloc_nodes(struct intel_guc *guc)
+{
+ /* skip if we've already done the pre-alloc */
+ if (guc->capture->max_mmio_per_node)
+ return;
+
+ guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
+ __guc_capture_create_prealloc_nodes(guc);
+}
+
+static int
+guc_capture_extract_reglists(struct intel_guc *guc, struct __guc_capture_bufstate *buf)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_state_capture_group_header_t ghdr = {0};
+ struct guc_state_capture_header_t hdr = {0};
+ struct __guc_capture_parsed_output *node = NULL;
+ struct guc_mmio_reg *regs = NULL;
+ int i, numlists, numregs, ret = 0;
+ enum guc_capture_type datatype;
+ struct guc_mmio_reg tmp;
+ bool is_partial = false;
+
+ i = guc_capture_buf_cnt(buf);
+ if (!i)
+ return -ENODATA;
+ if (i % sizeof(u32)) {
+ drm_warn(&i915->drm, "GuC Capture new entries unaligned\n");
+ ret = -EIO;
+ goto bailout;
+ }
+
+ /* first get the capture group header */
+ if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
+ ret = -EIO;
+ goto bailout;
+ }
+ /*
+ * we would typically expect a layout as below where n would be expected to be
+ * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
+ * instances being reset together.
+ * ____________________________________________
+ * | Capture Group |
+ * | ________________________________________ |
+ * | | Capture Group Header: | |
+ * | | - num_captures = 5 | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture1: | |
+ * | | Hdr: GLOBAL, numregs=a | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rega | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture2: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regb | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture3: | |
+ * | | Hdr: INSTANCE=RCS, numregs=c | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regc | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture4: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regd | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture5: | |
+ * | | Hdr: INSTANCE=CCS0, numregs=e | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rege | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * |__________________________________________|
+ */
+ is_partial = FIELD_GET(CAP_GRP_HDR_CAPTURE_TYPE, ghdr.info);
+ numlists = FIELD_GET(CAP_GRP_HDR_NUM_CAPTURES, ghdr.info);
+
+ while (numlists--) {
+ if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
+ ret = -EIO;
+ break;
+ }
+
+ datatype = FIELD_GET(CAP_HDR_CAPTURE_TYPE, hdr.info);
+ if (datatype > GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE) {
+ /* unknown capture type - skip over to next capture set */
+ numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &tmp)) {
+ ret = -EIO;
+ break;
+ }
+ }
+ continue;
+ } else if (node) {
+ /*
+ * Based on the current capture type and what we have so far,
+ * decide if we should add the current node into the internal
+ * linked list for match-up when i915_gpu_coredump calls later
+ * (and alloc a blank node for the next set of reglists)
+ * or continue with the same node or clone the current node
+ * but only retain the global or class registers (such as the
+ * case of dependent engine resets).
+ */
+ if (datatype == GUC_CAPTURE_LIST_TYPE_GLOBAL) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS &&
+ node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS].num_regs) {
+ /* Add to list, clone node and duplicate global list */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ GCAP_PARSED_REGLIST_INDEX_GLOBAL);
+ } else if (datatype == GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE &&
+ node->reginfo[GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE].num_regs) {
+ /* Add to list, clone node and duplicate global + class lists */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
+ GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
+ }
+ }
+
+ if (!node) {
+ node = guc_capture_get_prealloc_node(guc);
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (datatype != GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_dbg(&i915->drm, "GuC Capture missing global dump: %08x!\n",
+ datatype);
+ }
+ node->is_partial = is_partial;
+ node->reginfo[datatype].vfid = FIELD_GET(CAP_HDR_CAPTURE_VFID, hdr.owner);
+ switch (datatype) {
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
+ node->eng_inst = FIELD_GET(CAP_HDR_ENGINE_INSTANCE, hdr.info);
+ node->lrca = hdr.lrca;
+ node->guc_id = hdr.guc_id;
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ node->eng_class = FIELD_GET(CAP_HDR_ENGINE_CLASS, hdr.info);
+ break;
+ default:
+ break;
+ }
+
+ numregs = FIELD_GET(CAP_HDR_NUM_MMIOS, hdr.num_mmios);
+ if (numregs > guc->capture->max_mmio_per_node) {
+ drm_dbg(&i915->drm, "GuC Capture list extraction clipped by prealloc!\n");
+ numregs = guc->capture->max_mmio_per_node;
+ }
+ node->reginfo[datatype].num_regs = numregs;
+ regs = node->reginfo[datatype].regs;
+ i = 0;
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &regs[i++])) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+bailout:
+ if (node) {
+ /* If we have data, add to linked list for match-up when i915_gpu_coredump calls */
+ for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ if (node->reginfo[i].regs) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ break;
+ }
+ }
+ if (node) /* else return it back to cache list */
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+ return ret;
+}
+
+static int __guc_capture_flushlog_complete(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_CAPTURE_LOG_BUFFER
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static void __guc_capture_process_output(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, full_count;
+ struct intel_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct guc_log_buffer_state log_buf_state_local;
+ struct guc_log_buffer_state *log_buf_state;
+ struct __guc_capture_bufstate buf;
+ void *src_data = NULL;
+ bool new_overflow;
+ int ret;
+
+ log_buf_state = guc->log.buf_addr +
+ (sizeof(struct guc_log_buffer_state) * GUC_CAPTURE_LOG_BUFFER);
+ src_data = guc->log.buf_addr + intel_guc_get_log_buffer_offset(GUC_CAPTURE_LOG_BUFFER);
+
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state));
+ buffer_size = intel_guc_get_log_buffer_size(GUC_CAPTURE_LOG_BUFFER);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_count = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.stats[GUC_CAPTURE_LOG_BUFFER].flush += log_buf_state_local.flush_to_file;
+ new_overflow = intel_guc_check_log_buf_overflow(&guc->log, GUC_CAPTURE_LOG_BUFFER,
+ full_count);
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ drm_err(&i915->drm, "invalid GuC log capture buffer state!\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ buf.size = buffer_size;
+ buf.rd = read_offset;
+ buf.wr = write_offset;
+ buf.data = src_data;
+
+ if (!uc->reset_in_progress) {
+ do {
+ ret = guc_capture_extract_reglists(guc, &buf);
+ } while (ret >= 0);
+ }
+
+ /* Update the state of log buffer err-cap state */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ __guc_capture_flushlog_complete(guc);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+
+static const char *
+guc_capture_reg_to_str(const struct intel_guc *guc, u32 owner, u32 type,
+ u32 class, u32 id, u32 offset, u32 *is_ext)
+{
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ struct __guc_mmio_reg_descr_group *matchext;
+ int j;
+
+ *is_ext = 0;
+ if (!reglists)
+ return NULL;
+
+ match = guc_capture_get_one_list(reglists, owner, type, id);
+ if (!match)
+ return NULL;
+
+ for (j = 0; j < match->num_regs; ++j) {
+ if (offset == match->list[j].reg.reg)
+ return match->list[j].regname;
+ }
+ if (extlists) {
+ matchext = guc_capture_get_one_ext_list(extlists, owner, type, id);
+ if (!matchext)
+ return NULL;
+ for (j = 0; j < matchext->num_regs; ++j) {
+ if (offset == matchext->extlist[j].reg.reg) {
+ *is_ext = 1;
+ return matchext->extlist[j].regname;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+#define __out(a, ...) \
+ do { \
+ drm_warn((&(a)->i915->drm), __VA_ARGS__); \
+ i915_error_printf((a), __VA_ARGS__); \
+ } while (0)
+#else
+#define __out(a, ...) \
+ i915_error_printf(a, __VA_ARGS__)
+#endif
+
+#define GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng) \
+ do { \
+ __out(ebuf, " i915-Eng-Name: %s command stream\n", \
+ (eng)->name); \
+ __out(ebuf, " i915-Eng-Inst-Class: 0x%02x\n", (eng)->class); \
+ __out(ebuf, " i915-Eng-Inst-Id: 0x%02x\n", (eng)->instance); \
+ __out(ebuf, " i915-Eng-LogicalMask: 0x%08x\n", \
+ (eng)->logical_mask); \
+ } while (0)
+
+#define GCAP_PRINT_GUC_INST_INFO(ebuf, node) \
+ do { \
+ __out(ebuf, " GuC-Engine-Inst-Id: 0x%08x\n", \
+ (node)->eng_inst); \
+ __out(ebuf, " GuC-Context-Id: 0x%08x\n", (node)->guc_id); \
+ __out(ebuf, " LRCA: 0x%08x\n", (node)->lrca); \
+ } while (0)
+
+int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ const struct intel_engine_coredump *ee)
+{
+ const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
+ "full-capture",
+ "partial-capture"
+ };
+ const char *datatype[GUC_CAPTURE_LIST_TYPE_MAX] = {
+ "Global",
+ "Engine-Class",
+ "Engine-Instance"
+ };
+ struct intel_guc_state_capture *cap;
+ struct __guc_capture_parsed_output *node;
+ struct intel_engine_cs *eng;
+ struct guc_mmio_reg *regs;
+ struct intel_guc *guc;
+ const char *str;
+ int numregs, i, j;
+ u32 is_ext;
+
+ if (!ebuf || !ee)
+ return -EINVAL;
+ cap = ee->capture;
+ if (!cap || !ee->engine)
+ return -ENODEV;
+
+ guc = &ee->engine->gt->uc.guc;
+
+ __out(ebuf, "global --- GuC Error Capture on %s command stream:\n",
+ ee->engine->name);
+
+ node = ee->guc_capture_node;
+ if (!node) {
+ __out(ebuf, " No matching ee-node\n");
+ return 0;
+ }
+
+ __out(ebuf, "Coverage: %s\n", grptype[node->is_partial]);
+
+ for (i = GUC_CAPTURE_LIST_TYPE_GLOBAL; i < GUC_CAPTURE_LIST_TYPE_MAX; ++i) {
+ __out(ebuf, " RegListType: %s\n",
+ datatype[i % GUC_CAPTURE_LIST_TYPE_MAX]);
+ __out(ebuf, " Owner-Id: %d\n", node->reginfo[i].vfid);
+
+ switch (i) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ default:
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ __out(ebuf, " GuC-Eng-Class: %d\n", node->eng_class);
+ __out(ebuf, " i915-Eng-Class: %d\n",
+ guc_class_to_engine_class(node->eng_class));
+ break;
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ eng = intel_guc_lookup_engine(guc, node->eng_class, node->eng_inst);
+ if (eng)
+ GCAP_PRINT_INTEL_ENG_INFO(ebuf, eng);
+ else
+ __out(ebuf, " i915-Eng-Lookup Fail!\n");
+ GCAP_PRINT_GUC_INST_INFO(ebuf, node);
+ break;
+ }
+
+ numregs = node->reginfo[i].num_regs;
+ __out(ebuf, " NumRegs: %d\n", numregs);
+ j = 0;
+ while (numregs--) {
+ regs = node->reginfo[i].regs;
+ str = guc_capture_reg_to_str(guc, GUC_CAPTURE_LIST_INDEX_PF, i,
+ node->eng_class, 0, regs[j].offset, &is_ext);
+ if (!str)
+ __out(ebuf, " REG-0x%08x", regs[j].offset);
+ else
+ __out(ebuf, " %s", str);
+ if (is_ext)
+ __out(ebuf, "[%ld][%ld]",
+ FIELD_GET(GUC_REGSET_STEERING_GROUP, regs[j].flags),
+ FIELD_GET(GUC_REGSET_STEERING_INSTANCE, regs[j].flags));
+ __out(ebuf, ": 0x%08x\n", regs[j].value);
+ ++j;
+ }
+ }
+ return 0;
+}
+
+#endif //CONFIG_DRM_I915_CAPTURE_ERROR
+
+void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
+{
+ if (!ee || !ee->guc_capture_node)
+ return;
+
+ guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
+ ee->capture = NULL;
+ ee->guc_capture_node = NULL;
+}
+
+void intel_guc_capture_get_matching_node(struct intel_gt *gt,
+ struct intel_engine_coredump *ee,
+ struct intel_context *ce)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+ struct drm_i915_private *i915;
+ struct intel_guc *guc;
+
+ if (!gt || !ee || !ce)
+ return;
+
+ i915 = gt->i915;
+ guc = &gt->uc.guc;
+ if (!guc->capture)
+ return;
+
+ GEM_BUG_ON(ee->guc_capture_node);
+ /*
+ * Look for a matching GuC reported error capture node from
+ * the internal output link-list based on lrca, guc-id and engine
+ * identification.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ if (n->eng_inst == GUC_ID_TO_ENGINE_INSTANCE(ee->engine->guc_id) &&
+ n->eng_class == GUC_ID_TO_ENGINE_CLASS(ee->engine->guc_id) &&
+ n->guc_id && n->guc_id == ce->guc_id.id &&
+ (n->lrca & CTX_GTT_ADDRESS_MASK) && (n->lrca & CTX_GTT_ADDRESS_MASK) ==
+ (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ list_del(&n->link);
+ ee->guc_capture_node = n;
+ ee->capture = guc->capture;
+ return;
+ }
+ }
+ drm_dbg(&i915->drm, "GuC capture can't match ee to node\n");
+}
+
+void intel_guc_capture_process(struct intel_guc *guc)
+{
+ if (guc->capture)
+ __guc_capture_process_output(guc);
+}
+
+static void
+guc_capture_free_ads_cache(struct intel_guc_state_capture *gc)
+{
+ int i, j, k;
+ struct __guc_capture_ads_cache *cache;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_CAPTURE_LIST_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_MAX_ENGINE_CLASSES; ++k) {
+ cache = &gc->ads_cache[i][j][k];
+ if (cache->is_valid)
+ kfree(cache->ptr);
+ }
+ }
+ }
+ kfree(gc->ads_null_cache);
+}
+
+void intel_guc_capture_destroy(struct intel_guc *guc)
+{
+ if (!guc->capture)
+ return;
+
+ guc_capture_free_ads_cache(guc->capture);
+
+ guc_capture_delete_prealloc_nodes(guc);
+
+ guc_capture_free_extlists(guc->capture->extlists);
+ kfree(guc->capture->extlists);
+
+ kfree(guc->capture);
+ guc->capture = NULL;
+}
+
+int intel_guc_capture_init(struct intel_guc *guc)
+{
+ guc->capture = kzalloc(sizeof(*guc->capture), GFP_KERNEL);
+ if (!guc->capture)
+ return -ENOMEM;
+
+ guc->capture->reglists = guc_capture_get_device_reglist(guc);
+
+ INIT_LIST_HEAD(&guc->capture->outlist);
+ INIT_LIST_HEAD(&guc->capture->cachelist);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
new file mode 100644
index 000000000000..d3d7bd0b6db6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_CAPTURE_H
+#define _INTEL_GUC_CAPTURE_H
+
+#include <linux/types.h>
+
+struct drm_i915_error_state_buf;
+struct guc_gt_system_info;
+struct intel_engine_coredump;
+struct intel_context;
+struct intel_gt;
+struct intel_guc;
+
+void intel_guc_capture_free_node(struct intel_engine_coredump *ee);
+int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_coredump *ee);
+void intel_guc_capture_get_matching_node(struct intel_gt *gt, struct intel_engine_coredump *ee,
+ struct intel_context *ce);
+void intel_guc_capture_process(struct intel_guc *guc);
+int intel_guc_capture_output_min_size_est(struct intel_guc *guc);
+int intel_guc_capture_getlist(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ void **outptr);
+int intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size);
+int intel_guc_capture_getnullheader(struct intel_guc *guc, void **outptr, size_t *size);
+void intel_guc_capture_destroy(struct intel_guc *guc);
+int intel_guc_capture_init(struct intel_guc *guc);
+
+#endif /* _INTEL_GUC_CAPTURE_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 4b300b6cc0f9..42cb7a9a6199 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -32,8 +32,8 @@
#define GUC_CLIENT_PRIORITY_NORMAL 3
#define GUC_CLIENT_PRIORITY_NUM 4
-#define GUC_MAX_LRC_DESCRIPTORS 65535
-#define GUC_INVALID_LRC_ID GUC_MAX_LRC_DESCRIPTORS
+#define GUC_MAX_CONTEXT_ID 65535
+#define GUC_INVALID_CONTEXT_ID GUC_MAX_CONTEXT_ID
#define GUC_RENDER_ENGINE 0
#define GUC_VIDEO_ENGINE 1
@@ -98,7 +98,13 @@
#define GUC_LOG_BUF_ADDR_SHIFT 12
#define GUC_CTL_WA 1
-#define GUC_WA_POLLCS BIT(18)
+#define GUC_WA_GAM_CREDITS BIT(10)
+#define GUC_WA_DUAL_QUEUE BIT(11)
+#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
+#define GUC_WA_POLLCS BIT(18)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
@@ -197,54 +203,45 @@ struct guc_wq_item {
u32 fence_id;
} __packed;
-struct guc_process_desc {
- u32 stage_id;
- u64 db_base_addr;
+struct guc_sched_wq_desc {
u32 head;
u32 tail;
u32 error_offset;
- u64 wq_base_addr;
- u32 wq_size_bytes;
u32 wq_status;
- u32 engine_presence;
- u32 priority;
- u32 reserved[36];
+ u32 reserved[28];
} __packed;
+/* Helper for context registration H2G */
+struct guc_ctxt_registration_info {
+ u32 flags;
+ u32 context_idx;
+ u32 engine_class;
+ u32 engine_submit_mask;
+ u32 wq_desc_lo;
+ u32 wq_desc_hi;
+ u32 wq_base_lo;
+ u32 wq_base_hi;
+ u32 wq_size;
+ u32 hwlrca_lo;
+ u32 hwlrca_hi;
+};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
-#define CONTEXT_POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
-#define CONTEXT_POLICY_DEFAULT_PREEMPTION_TIME_US 500000
+/* 32-bit KLV structure as used by policy updates and others */
+struct guc_klv_generic_dw_t {
+ u32 kl;
+ u32 value;
+} __packed;
-/* Preempt to idle on quantum expiry */
-#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE BIT(0)
+/* Format of the UPDATE_CONTEXT_POLICIES H2G data packet */
+struct guc_update_context_policy_header {
+ u32 action;
+ u32 ctx_id;
+} __packed;
-/*
- * GuC Context registration descriptor.
- * FIXME: This is only required to exist during context registration.
- * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
- * is not required.
- */
-struct guc_lrc_desc {
- u32 hw_context_desc;
- u32 slpm_perf_mode_hint; /* SPLC v1 only */
- u32 slpm_freq_hint;
- u32 engine_submit_mask; /* In logical space */
- u8 engine_class;
- u8 reserved0[3];
- u32 priority;
- u32 process_desc;
- u32 wq_addr;
- u32 wq_size;
- u32 context_flags; /* CONTEXT_REGISTRATION_* */
- /* Time for one workload to execute. (in micro seconds) */
- u32 execution_quantum;
- /* Time to wait for a preemption request to complete before issuing a
- * reset. (in micro seconds).
- */
- u32 preemption_timeout;
- u32 policy_flags; /* CONTEXT_POLICY_* */
- u32 reserved1[19];
+struct guc_update_context_policy {
+ struct guc_update_context_policy_header header;
+ struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
} __packed;
#define GUC_POWER_UNSPECIFIED 0
@@ -285,10 +282,13 @@ struct guc_mmio_reg {
u32 offset;
u32 value;
u32 flags;
- u32 mask;
#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_NEEDS_STEERING BIT(1)
#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
#define GUC_REGSET_RESTORE_ONLY BIT(3)
+#define GUC_REGSET_STEERING_GROUP GENMASK(15, 12)
+#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20)
+ u32 mask;
} __packed;
/* GuC register sets */
@@ -311,6 +311,14 @@ enum {
GUC_CAPTURE_LIST_INDEX_MAX = 2,
};
+/*Register-types of GuC capture register lists */
+enum guc_capture_type {
+ GUC_CAPTURE_LIST_TYPE_GLOBAL = 0,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ GUC_CAPTURE_LIST_TYPE_MAX,
+};
+
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
new file mode 100644
index 000000000000..79c66b6b51a3
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_hwconfig.h"
+#include "i915_drv.h"
+#include "i915_memcpy.h"
+
+/*
+ * GuC has a blob containing hardware configuration information (HWConfig).
+ * This is formatted as a simple and flexible KLV (Key/Length/Value) table.
+ *
+ * For example, a minimal version could be:
+ * enum device_attr {
+ * ATTR_SOME_VALUE = 0,
+ * ATTR_SOME_MASK = 1,
+ * };
+ *
+ * static const u32 hwconfig[] = {
+ * ATTR_SOME_VALUE,
+ * 1, // Value Length in DWords
+ * 8, // Value
+ *
+ * ATTR_SOME_MASK,
+ * 3,
+ * 0x00FFFFFFFF, 0xFFFFFFFF, 0xFF000000,
+ * };
+ *
+ * The attribute ids are defined in a hardware spec.
+ */
+
+static int __guc_action_get_hwconfig(struct intel_guc *guc,
+ u32 ggtt_offset, u32 ggtt_size)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_GET_HWCONFIG,
+ lower_32_bits(ggtt_offset),
+ upper_32_bits(ggtt_offset),
+ ggtt_size,
+ };
+ int ret;
+
+ ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+ if (ret == -ENXIO)
+ return -ENOENT;
+
+ return ret;
+}
+
+static int guc_hwconfig_discover_size(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
+{
+ int ret;
+
+ /*
+ * Sending a query with zero offset and size will return the
+ * size of the blob.
+ */
+ ret = __guc_action_get_hwconfig(guc, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ return -EINVAL;
+
+ hwconfig->size = ret;
+ return 0;
+}
+
+static int guc_hwconfig_fill_buffer(struct intel_guc *guc, struct intel_hwconfig *hwconfig)
+{
+ struct i915_vma *vma;
+ u32 ggtt_offset;
+ void *vaddr;
+ int ret;
+
+ GEM_BUG_ON(!hwconfig->size);
+
+ ret = intel_guc_allocate_and_map_vma(guc, hwconfig->size, &vma, &vaddr);
+ if (ret)
+ return ret;
+
+ ggtt_offset = intel_guc_ggtt_offset(guc, vma);
+
+ ret = __guc_action_get_hwconfig(guc, ggtt_offset, hwconfig->size);
+ if (ret >= 0)
+ memcpy(hwconfig->ptr, vaddr, hwconfig->size);
+
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+
+ return ret;
+}
+
+static bool has_table(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return true;
+ if (IS_DG2(i915))
+ return true;
+
+ return false;
+}
+
+/**
+ * intel_guc_hwconfig_init - Initialize the HWConfig
+ *
+ * Retrieve the HWConfig table from the GuC and save it locally.
+ * It can then be queried on demand by other users later on.
+ */
+static int guc_hwconfig_init(struct intel_gt *gt)
+{
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+ struct intel_guc *guc = &gt->uc.guc;
+ int ret;
+
+ if (!has_table(gt->i915))
+ return 0;
+
+ ret = guc_hwconfig_discover_size(guc, hwconfig);
+ if (ret)
+ return ret;
+
+ hwconfig->ptr = kmalloc(hwconfig->size, GFP_KERNEL);
+ if (!hwconfig->ptr) {
+ hwconfig->size = 0;
+ return -ENOMEM;
+ }
+
+ ret = guc_hwconfig_fill_buffer(guc, hwconfig);
+ if (ret < 0) {
+ intel_gt_fini_hwconfig(gt);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_gt_init_hwconfig - Initialize the HWConfig if available
+ *
+ * Retrieve the HWConfig table if available on the current platform.
+ */
+int intel_gt_init_hwconfig(struct intel_gt *gt)
+{
+ if (!intel_uc_uses_guc(&gt->uc))
+ return 0;
+
+ return guc_hwconfig_init(gt);
+}
+
+/**
+ * intel_gt_fini_hwconfig - Finalize the HWConfig
+ *
+ * Free up the memory allocation holding the table.
+ */
+void intel_gt_fini_hwconfig(struct intel_gt *gt)
+{
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+
+ kfree(hwconfig->ptr);
+ hwconfig->size = 0;
+ hwconfig->ptr = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index a24dc6441872..78d2989fe917 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -10,9 +10,10 @@
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
+#include "intel_guc_capture.h"
#include "intel_guc_log.h"
-static void guc_log_capture_logs(struct intel_guc_log *log);
+static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
/**
* DOC: GuC firmware log
@@ -26,7 +27,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log);
static int guc_action_flush_log_complete(struct intel_guc *guc)
{
u32 action[] = {
- INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_DEBUG_LOG_BUFFER
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -137,7 +139,7 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(log->relay.channel, log->vma->obj->base.size);
+ relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE);
/* Switch to the next sub buffer */
relay_flush(log->relay.channel);
@@ -157,9 +159,9 @@ static void *guc_get_write_buffer(struct intel_guc_log *log)
return relay_reserve(log->relay.channel, 0);
}
-static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
- enum guc_log_buffer_type type,
- unsigned int full_cnt)
+bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt)
{
unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
bool overflow = false;
@@ -182,7 +184,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
return overflow;
}
-static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
{
switch (type) {
case GUC_DEBUG_LOG_BUFFER:
@@ -198,7 +200,21 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
return 0;
}
-static void guc_read_update_log_buffer(struct intel_guc_log *log)
+size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
+{
+ enum guc_log_buffer_type i;
+ size_t offset = PAGE_SIZE;/* for the log_buffer_states */
+
+ for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
+ if (i == type)
+ break;
+ offset += intel_guc_get_log_buffer_size(i);
+ }
+
+ return offset;
+}
+
+static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
@@ -213,7 +229,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = log->relay.buf_addr;
+ src_data = log->buf_addr;
+ log_buf_state = src_data;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
@@ -223,7 +240,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
- DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
log->relay.full_count++;
goto out_unlock;
@@ -233,7 +250,8 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
src_data += PAGE_SIZE;
dst_data += PAGE_SIZE;
- for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* For relay logging, we exclude error state capture */
+ for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) {
/*
* Make a copy of the state structure, inside GuC log buffer
* (which is uncached mapped), on the stack to avoid reading
@@ -241,14 +259,14 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
*/
memcpy(&log_buf_state_local, log_buf_state,
sizeof(struct guc_log_buffer_state));
- buffer_size = guc_get_log_buffer_size(type);
+ buffer_size = intel_guc_get_log_buffer_size(type);
read_offset = log_buf_state_local.read_ptr;
write_offset = log_buf_state_local.sampled_write_ptr;
full_cnt = log_buf_state_local.buffer_full_cnt;
/* Bookkeeping stuff */
log->stats[type].flush += log_buf_state_local.flush_to_file;
- new_overflow = guc_check_log_buf_overflow(log, type, full_cnt);
+ new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt);
/* Update the state of shared log buffer */
log_buf_state->read_ptr = write_offset;
@@ -301,49 +319,43 @@ out_unlock:
mutex_unlock(&log->relay.lock);
}
-static void capture_logs_work(struct work_struct *work)
+static void copy_debug_logs_work(struct work_struct *work)
{
struct intel_guc_log *log =
container_of(work, struct intel_guc_log, relay.flush_work);
- guc_log_capture_logs(log);
+ guc_log_copy_debuglogs_for_relay(log);
}
-static int guc_log_map(struct intel_guc_log *log)
+static int guc_log_relay_map(struct intel_guc_log *log)
{
- void *vaddr;
-
lockdep_assert_held(&log->relay.lock);
- if (!log->vma)
+ if (!log->vma || !log->buf_addr)
return -ENODEV;
/*
- * Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
+ * WC vmalloc mapping of log buffer pages was done at
+ * GuC Log Init time, but lets keep a ref for book-keeping
*/
- vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr))
- return PTR_ERR(vaddr);
-
- log->relay.buf_addr = vaddr;
+ i915_gem_object_get(log->vma->obj);
+ log->relay.buf_in_use = true;
return 0;
}
-static void guc_log_unmap(struct intel_guc_log *log)
+static void guc_log_relay_unmap(struct intel_guc_log *log)
{
lockdep_assert_held(&log->relay.lock);
- i915_gem_object_unpin_map(log->vma->obj);
- log->relay.buf_addr = NULL;
+ i915_gem_object_put(log->vma->obj);
+ log->relay.buf_in_use = false;
}
void intel_guc_log_init_early(struct intel_guc_log *log)
{
mutex_init(&log->relay.lock);
- INIT_WORK(&log->relay.flush_work, capture_logs_work);
+ INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
log->relay.started = false;
}
@@ -358,8 +370,11 @@ static int guc_log_relay_create(struct intel_guc_log *log)
lockdep_assert_held(&log->relay.lock);
GEM_BUG_ON(!log->vma);
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = log->vma->size;
+ /*
+ * Keep the size of sub buffers same as shared log buffer
+ * but GuC log-events excludes the error-state-capture logs
+ */
+ subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE;
/*
* Store up to 8 snapshots, which is large enough to buffer sufficient
@@ -394,13 +409,13 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
log->relay.channel = NULL;
}
-static void guc_log_capture_logs(struct intel_guc_log *log)
+static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
- guc_read_update_log_buffer(log);
+ _guc_log_copy_debuglogs_for_relay(log);
/*
* Generally device is expected to be active only at this
@@ -440,6 +455,7 @@ int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct i915_vma *vma;
+ void *vaddr;
u32 guc_log_size;
int ret;
@@ -447,23 +463,28 @@ int intel_guc_log_create(struct intel_guc_log *log)
/*
* GuC Log buffer Layout
+ * (this ordering must follow "enum guc_log_buffer_type" definition)
*
* +===============================+ 00B
- * | Crash dump state header |
- * +-------------------------------+ 32B
* | Debug state header |
+ * +-------------------------------+ 32B
+ * | Crash dump state header |
* +-------------------------------+ 64B
* | Capture state header |
* +-------------------------------+ 96B
* | |
* +===============================+ PAGE_SIZE (4KB)
- * | Crash Dump logs |
- * +===============================+ + CRASH_SIZE
* | Debug logs |
* +===============================+ + DEBUG_SIZE
+ * | Crash Dump logs |
+ * +===============================+ + CRASH_SIZE
* | Capture logs |
* +===============================+ + CAPTURE_SIZE
*/
+ if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
+ DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
+ CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
+
guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
CAPTURE_BUFFER_SIZE;
@@ -474,6 +495,17 @@ int intel_guc_log_create(struct intel_guc_log *log)
}
log->vma = vma;
+ /*
+ * Create a WC (Uncached for read) vmalloc mapping up front immediate access to
+ * data from memory during critical events such as error capture
+ */
+ vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ i915_vma_unpin_and_release(&log->vma, 0);
+ goto err;
+ }
+ log->buf_addr = vaddr;
log->level = __get_default_log_level(log);
DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
@@ -484,13 +516,14 @@ int intel_guc_log_create(struct intel_guc_log *log)
return 0;
err:
- DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret);
+ DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
return ret;
}
void intel_guc_log_destroy(struct intel_guc_log *log)
{
- i915_vma_unpin_and_release(&log->vma, 0);
+ log->buf_addr = NULL;
+ i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP);
}
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
@@ -535,7 +568,7 @@ out_unlock:
bool intel_guc_log_relay_created(const struct intel_guc_log *log)
{
- return log->relay.buf_addr;
+ return log->buf_addr;
}
int intel_guc_log_relay_open(struct intel_guc_log *log)
@@ -566,7 +599,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
if (ret)
goto out_unlock;
- ret = guc_log_map(log);
+ ret = guc_log_relay_map(log);
if (ret)
goto out_relay;
@@ -616,8 +649,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
guc_action_flush_log(guc);
- /* GuC would have updated log buffer by now, so capture it */
- guc_log_capture_logs(log);
+ /* GuC would have updated log buffer by now, so copy it */
+ guc_log_copy_debuglogs_for_relay(log);
}
/*
@@ -646,7 +679,7 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
GEM_BUG_ON(!intel_guc_log_relay_created(log));
- guc_log_unmap(log);
+ guc_log_relay_unmap(log);
guc_log_relay_destroy(log);
mutex_unlock(&log->relay.lock);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index d7e1b6471fed..18007e639be9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -49,8 +49,9 @@ struct intel_guc;
struct intel_guc_log {
u32 level;
struct i915_vma *vma;
+ void *buf_addr;
struct {
- void *buf_addr;
+ bool buf_in_use;
bool started;
struct work_struct flush_work;
struct rchan *channel;
@@ -66,6 +67,10 @@ struct intel_guc_log {
};
void intel_guc_log_init_early(struct intel_guc_log *log);
+bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type,
+ unsigned int full_cnt);
+unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type);
+size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type);
int intel_guc_log_create(struct intel_guc_log *log);
void intel_guc_log_destroy(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 9f032c65a488..1db833da42df 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -153,8 +153,8 @@ static int slpc_query_task_state(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_query(guc, offset);
if (unlikely(ret))
- drm_err(&i915->drm, "Failed to query task state (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to query task state (%pe)\n",
+ ERR_PTR(ret));
drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
@@ -171,8 +171,8 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
ret = guc_action_slpc_set_param(guc, id, value);
if (ret)
- drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n",
- id, value, ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set param %d to %u (%pe)\n",
+ id, value, ERR_PTR(ret));
return ret;
}
@@ -212,8 +212,8 @@ static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
- drm_err(&i915->drm, "Unable to force min freq to %u: %d",
- freq, ret);
+ i915_probe_error(i915, "Unable to force min freq to %u: %d",
+ freq, ret);
}
return ret;
@@ -248,9 +248,9 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
- drm_err(&i915->drm,
- "Failed to allocate SLPC struct (err=%pe)\n",
- ERR_PTR(err));
+ i915_probe_error(i915,
+ "Failed to allocate SLPC struct (err=%pe)\n",
+ ERR_PTR(err));
return err;
}
@@ -317,15 +317,15 @@ static int slpc_reset(struct intel_guc_slpc *slpc)
ret = guc_action_slpc_reset(guc, offset);
if (unlikely(ret < 0)) {
- drm_err(&i915->drm, "SLPC reset action failed (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "SLPC reset action failed (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
if (!ret) {
if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
- drm_err(&i915->drm, "SLPC not enabled! State = %s\n",
- slpc_get_state_string(slpc));
+ i915_probe_error(i915, "SLPC not enabled! State = %s\n",
+ slpc_get_state_string(slpc));
return -EIO;
}
}
@@ -582,16 +582,12 @@ static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
{
struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
- u32 rp_state_cap;
+ struct intel_rps_freq_caps caps;
- rp_state_cap = intel_rps_read_state_cap(rps);
-
- slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) *
- GT_FREQUENCY_MULTIPLIER;
- slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) *
- GT_FREQUENCY_MULTIPLIER;
- slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
- GT_FREQUENCY_MULTIPLIER;
+ gen6_rps_get_freq_caps(rps, &caps);
+ slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
+ slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
+ slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
if (!slpc->boost_freq)
slpc->boost_freq = slpc->rp0_freq;
@@ -621,8 +617,8 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
- drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "SLPC Reset event returned (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
@@ -637,24 +633,24 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Ignore efficient freq and set min to platform min */
ret = slpc_ignore_eff_freq(slpc, true);
if (unlikely(ret)) {
- drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set SLPC min to RPn (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
- drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set SLPC max to RP0 (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
/* Revert SLPC min/max to softlimits if necessary */
ret = slpc_set_softlimits(slpc);
if (unlikely(ret)) {
- drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n",
- ERR_PTR(ret));
+ i915_probe_error(i915, "Failed to set SLPC softlimits (%pe)\n",
+ ERR_PTR(ret));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1ce7e04aa837..61a6f2424e24 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -25,6 +25,7 @@
#include "gt/intel_ring.h"
#include "intel_guc_ads.h"
+#include "intel_guc_capture.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -161,7 +162,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
#define SCHED_STATE_ENABLED BIT(4)
#define SCHED_STATE_PENDING_ENABLE BIT(5)
#define SCHED_STATE_REGISTERED BIT(6)
-#define SCHED_STATE_BLOCKED_SHIFT 7
+#define SCHED_STATE_POLICY_REQUIRED BIT(7)
+#define SCHED_STATE_BLOCKED_SHIFT 8
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
@@ -300,6 +302,23 @@ static inline void clr_context_registered(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
}
+static inline bool context_policy_required(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline void set_context_policy_required(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
+}
+
+static inline void clr_context_policy_required(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
+}
+
static inline u32 context_blocked(struct intel_context *ce)
{
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
@@ -351,12 +370,12 @@ request_to_scheduling_context(struct i915_request *rq)
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
- return ce->guc_id.id == GUC_INVALID_LRC_ID;
+ return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
}
static inline void set_context_guc_id_invalid(struct intel_context *ce)
{
- ce->guc_id.id = GUC_INVALID_LRC_ID;
+ ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
}
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
@@ -395,12 +414,12 @@ struct sync_semaphore {
};
struct parent_scratch {
- struct guc_process_desc pdesc;
+ struct guc_sched_wq_desc wq_desc;
struct sync_semaphore go;
struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
- u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
+ u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
u32 wq[WQ_SIZE / sizeof(u32)];
@@ -437,15 +456,15 @@ __get_parent_scratch(struct intel_context *ce)
LRC_STATE_OFFSET) / sizeof(u32)));
}
-static struct guc_process_desc *
-__get_process_desc(struct intel_context *ce)
+static struct guc_sched_wq_desc *
+__get_wq_desc(struct intel_context *ce)
{
struct parent_scratch *ps = __get_parent_scratch(ce);
- return &ps->pdesc;
+ return &ps->wq_desc;
}
-static u32 *get_wq_pointer(struct guc_process_desc *desc,
+static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
struct intel_context *ce,
u32 wqi_size)
{
@@ -457,7 +476,7 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc,
#define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
if (wqi_size > AVAILABLE_SPACE) {
- ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
+ ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
if (wqi_size > AVAILABLE_SPACE)
return NULL;
@@ -467,75 +486,27 @@ static u32 *get_wq_pointer(struct guc_process_desc *desc,
return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
}
-static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
-{
- struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
-
- GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
-
- return &base[index];
-}
-
static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
{
struct intel_context *ce = xa_load(&guc->context_lookup, id);
- GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
+ GEM_BUG_ON(id >= GUC_MAX_CONTEXT_ID);
return ce;
}
-static int guc_lrc_desc_pool_create(struct intel_guc *guc)
-{
- u32 size;
- int ret;
-
- size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
- GUC_MAX_LRC_DESCRIPTORS);
- ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
- (void **)&guc->lrc_desc_pool_vaddr);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
-{
- guc->lrc_desc_pool_vaddr = NULL;
- i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
-}
-
static inline bool guc_submission_initialized(struct intel_guc *guc)
{
- return !!guc->lrc_desc_pool_vaddr;
+ return guc->submission_initialized;
}
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
-{
- if (likely(guc_submission_initialized(guc))) {
- struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
- unsigned long flags;
-
- memset(desc, 0, sizeof(*desc));
-
- /*
- * xarray API doesn't have xa_erase_irqsave wrapper, so calling
- * the lower level functions directly.
- */
- xa_lock_irqsave(&guc->context_lookup, flags);
- __xa_erase(&guc->context_lookup, id);
- xa_unlock_irqrestore(&guc->context_lookup, flags);
- }
-}
-
-static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
+static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
{
return __get_context(guc, id);
}
-static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
- struct intel_context *ce)
+static inline void set_ctx_id_mapping(struct intel_guc *guc, u32 id,
+ struct intel_context *ce)
{
unsigned long flags;
@@ -548,6 +519,22 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
+static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
+{
+ unsigned long flags;
+
+ if (unlikely(!guc_submission_initialized(guc)))
+ return;
+
+ /*
+ * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+ * the lower level functions directly.
+ */
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ __xa_erase(&guc->context_lookup, id);
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
static void decr_outstanding_submission_g2h(struct intel_guc *guc)
{
if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
@@ -624,7 +611,8 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
true, timeout);
}
-static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
+static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int try_context_registration(struct intel_context *ce, bool loop);
static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
@@ -650,6 +638,12 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
+ if (context_policy_required(ce)) {
+ err = guc_context_policy_init(ce, false);
+ if (err)
+ return err;
+ }
+
spin_lock(&ce->guc_state.lock);
/*
@@ -743,7 +737,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
}
-static void write_wqi(struct guc_process_desc *desc,
+static void write_wqi(struct guc_sched_wq_desc *wq_desc,
struct intel_context *ce,
u32 wqi_size)
{
@@ -756,13 +750,13 @@ static void write_wqi(struct guc_process_desc *desc,
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
(WQ_SIZE - 1);
- WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
+ WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
}
static int guc_wq_noop_append(struct intel_context *ce)
{
- struct guc_process_desc *desc = __get_process_desc(ce);
- u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
+ struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
+ u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
if (!wqi)
@@ -781,7 +775,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
{
struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_context *child;
- struct guc_process_desc *desc = __get_process_desc(ce);
+ struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sizeof(u32);
u32 *wqi;
@@ -792,7 +786,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
- GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
+ GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
/* Insert NOOP if this work queue item will wrap the tail pointer. */
if (wqi_size > wq_space_until_wrap(ce)) {
@@ -801,7 +795,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
return ret;
}
- wqi = get_wq_pointer(desc, ce, wqi_size);
+ wqi = get_wq_pointer(wq_desc, ce, wqi_size);
if (!wqi)
return -EBUSY;
@@ -816,7 +810,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
for_each_child(ce, child)
*wqi++ = child->ring->tail / sizeof(u64);
- write_wqi(desc, ce, wqi_size);
+ write_wqi(wq_desc, ce, wqi_size);
return 0;
}
@@ -920,9 +914,9 @@ register_context:
if (submit) {
struct intel_context *ce = request_to_scheduling_context(last);
- if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
+ if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
!intel_context_is_banned(ce))) {
- ret = guc_lrc_desc_pin(ce, false);
+ ret = try_context_registration(ce, false);
if (unlikely(ret == -EPIPE)) {
goto deadlk;
} else if (ret == -EBUSY) {
@@ -1546,6 +1540,89 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
lrc_update_regs(ce, engine, head);
}
+static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
+{
+ static const i915_reg_t _reg[I915_NUM_ENGINES] = {
+ [RCS0] = MSG_IDLE_CS,
+ [BCS0] = MSG_IDLE_BCS,
+ [VCS0] = MSG_IDLE_VCS0,
+ [VCS1] = MSG_IDLE_VCS1,
+ [VCS2] = MSG_IDLE_VCS2,
+ [VCS3] = MSG_IDLE_VCS3,
+ [VCS4] = MSG_IDLE_VCS4,
+ [VCS5] = MSG_IDLE_VCS5,
+ [VCS6] = MSG_IDLE_VCS6,
+ [VCS7] = MSG_IDLE_VCS7,
+ [VECS0] = MSG_IDLE_VECS0,
+ [VECS1] = MSG_IDLE_VECS1,
+ [VECS2] = MSG_IDLE_VECS2,
+ [VECS3] = MSG_IDLE_VECS3,
+ [CCS0] = MSG_IDLE_CS,
+ [CCS1] = MSG_IDLE_CS,
+ [CCS2] = MSG_IDLE_CS,
+ [CCS3] = MSG_IDLE_CS,
+ };
+ u32 val;
+
+ if (!_reg[engine->id].reg)
+ return 0;
+
+ val = intel_uncore_read(engine->uncore, _reg[engine->id]);
+
+ /* bits[29:25] & bits[13:9] >> shift */
+ return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT;
+}
+
+static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask)
+{
+ int ret;
+
+ /* Ensure GPM receives fw up/down after CS is stopped */
+ udelay(1);
+
+ /* Wait for forcewake request to complete in GPM */
+ ret = __intel_wait_for_register_fw(gt->uncore,
+ GEN9_PWRGT_DOMAIN_STATUS,
+ fw_mask, fw_mask, 5000, 0, NULL);
+
+ /* Ensure CS receives fw ack from GPM */
+ udelay(1);
+
+ if (ret)
+ GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret);
+}
+
+/*
+ * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any
+ * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The
+ * pending status is indicated by bits[13:9] (masked by bits[ 29:25]) in the
+ * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we
+ * are concerned only with the gt reset here, we use a logical OR of pending
+ * forcewakeups from all reset domains and then wait for them to complete by
+ * querying PWRGT_DOMAIN_STATUS.
+ */
+static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+ u32 fw_pending;
+
+ if (GRAPHICS_VER(engine->i915) != 12)
+ return;
+
+ /*
+ * Wa_22011802037
+ * TODO: Occasionally trying to stop the cs times out, but does not
+ * adversely affect functionality. The timeout is set as a config
+ * parameter that defaults to 100ms. Assuming that this timeout is
+ * sufficient for any pending MI_FORCEWAKEs to complete, ignore the
+ * timeout returned here until it is root caused.
+ */
+ intel_engine_stop_cs(engine);
+
+ fw_pending = __cs_pending_mi_force_wakes(engine);
+ if (fw_pending)
+ __gpm_wait_for_fw_complete(engine->gt, fw_pending);
+}
+
static void guc_reset_nop(struct intel_engine_cs *engine)
{
}
@@ -1804,20 +1881,10 @@ static void reset_fail_worker_func(struct work_struct *w);
int intel_guc_submission_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- int ret;
- if (guc->lrc_desc_pool)
+ if (guc->submission_initialized)
return 0;
- ret = guc_lrc_desc_pool_create(guc);
- if (ret)
- return ret;
- /*
- * Keep static analysers happy, let them know that we allocated the
- * vma after testing that it didn't exist earlier.
- */
- GEM_BUG_ON(!guc->lrc_desc_pool);
-
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
if (!guc->submission_state.guc_ids_bitmap)
@@ -1825,19 +1892,20 @@ int intel_guc_submission_init(struct intel_guc *guc)
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
guc->timestamp.shift = gpm_timestamp_shift(gt);
+ guc->submission_initialized = true;
return 0;
}
void intel_guc_submission_fini(struct intel_guc *guc)
{
- if (!guc->lrc_desc_pool)
+ if (!guc->submission_initialized)
return;
guc_flush_destroyed_contexts(guc);
- guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine);
bitmap_free(guc->submission_state.guc_ids_bitmap);
+ guc->submission_initialized = false;
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
@@ -1884,7 +1952,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
return submission_disabled(guc) || guc->stalled_request ||
!i915_sched_engine_is_empty(sched_engine) ||
- !lrc_desc_registered(guc, ce->guc_id.id);
+ !ctx_id_mapped(guc, ce->guc_id.id);
}
static void guc_submit_request(struct i915_request *rq)
@@ -1941,7 +2009,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
else
ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id);
- reset_lrc_desc(guc, ce->guc_id.id);
+ clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
if (!list_empty(&ce->guc_id.link))
@@ -2094,65 +2162,96 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
static int __guc_action_register_multi_lrc(struct intel_guc *guc,
struct intel_context *ce,
- u32 guc_id,
- u32 offset,
+ struct guc_ctxt_registration_info *info,
bool loop)
{
struct intel_context *child;
- u32 action[4 + MAX_ENGINE_INSTANCE];
+ u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
int len = 0;
+ u32 next_id;
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
- action[len++] = guc_id;
+ action[len++] = info->flags;
+ action[len++] = info->context_idx;
+ action[len++] = info->engine_class;
+ action[len++] = info->engine_submit_mask;
+ action[len++] = info->wq_desc_lo;
+ action[len++] = info->wq_desc_hi;
+ action[len++] = info->wq_base_lo;
+ action[len++] = info->wq_base_hi;
+ action[len++] = info->wq_size;
action[len++] = ce->parallel.number_children + 1;
- action[len++] = offset;
+ action[len++] = info->hwlrca_lo;
+ action[len++] = info->hwlrca_hi;
+
+ next_id = info->context_idx + 1;
for_each_child(ce, child) {
- offset += sizeof(struct guc_lrc_desc);
- action[len++] = offset;
+ GEM_BUG_ON(next_id++ != child->guc_id.id);
+
+ /*
+ * NB: GuC interface supports 64 bit LRCA even though i915/HW
+ * only supports 32 bit currently.
+ */
+ action[len++] = lower_32_bits(child->lrc.lrca);
+ action[len++] = upper_32_bits(child->lrc.lrca);
}
+ GEM_BUG_ON(len > ARRAY_SIZE(action));
+
return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_context(struct intel_guc *guc,
- u32 guc_id,
- u32 offset,
+ struct guc_ctxt_registration_info *info,
bool loop)
{
u32 action[] = {
INTEL_GUC_ACTION_REGISTER_CONTEXT,
- guc_id,
- offset,
+ info->flags,
+ info->context_idx,
+ info->engine_class,
+ info->engine_submit_mask,
+ info->wq_desc_lo,
+ info->wq_desc_hi,
+ info->wq_base_lo,
+ info->wq_base_hi,
+ info->wq_size,
+ info->hwlrca_lo,
+ info->hwlrca_hi,
};
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
0, loop);
}
+static void prepare_context_registration_info(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info);
+
static int register_context(struct intel_context *ce, bool loop)
{
+ struct guc_ctxt_registration_info info;
struct intel_guc *guc = ce_to_guc(ce);
- u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
- ce->guc_id.id * sizeof(struct guc_lrc_desc);
int ret;
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
+ prepare_context_registration_info(ce, &info);
+
if (intel_context_is_parent(ce))
- ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
- offset, loop);
+ ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
else
- ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
- loop);
+ ret = __guc_action_register_context(guc, &info, loop);
if (likely(!ret)) {
unsigned long flags;
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ guc_context_policy_init(ce, loop);
}
return ret;
@@ -2202,33 +2301,120 @@ static inline u32 get_children_join_value(struct intel_context *ce,
return __get_parent_scratch(ce)->join[child_index].semaphore;
}
-static void guc_context_policy_init(struct intel_engine_cs *engine,
- struct guc_lrc_desc *desc)
+struct context_policy {
+ u32 count;
+ struct guc_update_context_policy h2g;
+};
+
+static u32 __guc_context_policy_action_size(struct context_policy *policy)
{
- desc->policy_flags = 0;
+ size_t bytes = sizeof(policy->h2g.header) +
+ (sizeof(policy->h2g.klv[0]) * policy->count);
- if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
- desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
+ return bytes / sizeof(u32);
+}
+
+static void __guc_context_policy_start_klv(struct context_policy *policy, u16 guc_id)
+{
+ policy->h2g.header.action = INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
+ policy->h2g.header.ctx_id = guc_id;
+ policy->count = 0;
+}
+
+#define MAKE_CONTEXT_POLICY_ADD(func, id) \
+static void __guc_context_policy_add_##func(struct context_policy *policy, u32 data) \
+{ \
+ GEM_BUG_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
+ policy->h2g.klv[policy->count].kl = \
+ FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
+ FIELD_PREP(GUC_KLV_0_LEN, 1); \
+ policy->h2g.klv[policy->count].value = data; \
+ policy->count++; \
+}
+
+MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
+MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
+MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+
+#undef MAKE_CONTEXT_POLICY_ADD
+
+static int __guc_context_set_context_policies(struct intel_guc *guc,
+ struct context_policy *policy,
+ bool loop)
+{
+ return guc_submission_send_busy_loop(guc, (u32 *)&policy->h2g,
+ __guc_context_policy_action_size(policy),
+ 0, loop);
+}
+
+static int guc_context_policy_init(struct intel_context *ce, bool loop)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct context_policy policy;
+ u32 execution_quantum;
+ u32 preemption_timeout;
+ bool missing = false;
+ unsigned long flags;
+ int ret;
/* NB: For both of these, zero means disabled. */
- desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
- desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ execution_quantum = engine->props.timeslice_duration_ms * 1000;
+ preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ __guc_context_policy_add_preempt_to_idle(&policy, 1);
+
+ ret = __guc_context_set_context_policies(guc, &policy, loop);
+ missing = ret != 0;
+
+ if (!missing && intel_context_is_parent(ce)) {
+ struct intel_context *child;
+
+ for_each_child(ce, child) {
+ __guc_context_policy_start_klv(&policy, child->guc_id.id);
+
+ if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+ __guc_context_policy_add_preempt_to_idle(&policy, 1);
+
+ child->guc_state.prio = ce->guc_state.prio;
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_policy_add_execution_quantum(&policy, execution_quantum);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+
+ ret = __guc_context_set_context_policies(guc, &policy, loop);
+ if (ret) {
+ missing = true;
+ break;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (missing)
+ set_context_policy_required(ce);
+ else
+ clr_context_policy_required(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ return ret;
}
-static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
+static void prepare_context_registration_info(struct intel_context *ce,
+ struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 desc_idx = ce->guc_id.id;
- struct guc_lrc_desc *desc;
- bool context_registered;
- intel_wakeref_t wakeref;
- struct intel_context *child;
- int ret = 0;
+ u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
- GEM_BUG_ON(!sched_state_is_init(ce));
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
@@ -2237,55 +2423,63 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
i915_gem_object_is_lmem(ce->ring->vma->obj));
- context_registered = lrc_desc_registered(guc, desc_idx);
-
- reset_lrc_desc(guc, desc_idx);
- set_lrc_desc_registered(guc, desc_idx, ce);
-
- desc = __get_lrc_desc(guc, desc_idx);
- desc->engine_class = engine_class_to_guc_class(engine->class);
- desc->engine_submit_mask = engine->logical_mask;
- desc->hw_context_desc = ce->lrc.lrca;
- desc->priority = ce->guc_state.prio;
- desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
- guc_context_policy_init(engine, desc);
+ memset(info, 0, sizeof(*info));
+ info->context_idx = ctx_id;
+ info->engine_class = engine_class_to_guc_class(engine->class);
+ info->engine_submit_mask = engine->logical_mask;
+ /*
+ * NB: GuC interface supports 64 bit LRCA even though i915/HW
+ * only supports 32 bit currently.
+ */
+ info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
+ info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+ info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
/*
* If context is a parent, we need to register a process descriptor
* describing a work queue and register all child contexts.
*/
if (intel_context_is_parent(ce)) {
- struct guc_process_desc *pdesc;
+ struct guc_sched_wq_desc *wq_desc;
+ u64 wq_desc_offset, wq_base_offset;
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
- desc->process_desc = i915_ggtt_offset(ce->state) +
- __get_parent_scratch_offset(ce);
- desc->wq_addr = i915_ggtt_offset(ce->state) +
- __get_wq_offset(ce);
- desc->wq_size = WQ_SIZE;
+ wq_desc_offset = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ wq_base_offset = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ info->wq_desc_lo = lower_32_bits(wq_desc_offset);
+ info->wq_desc_hi = upper_32_bits(wq_desc_offset);
+ info->wq_base_lo = lower_32_bits(wq_base_offset);
+ info->wq_base_hi = upper_32_bits(wq_base_offset);
+ info->wq_size = WQ_SIZE;
- pdesc = __get_process_desc(ce);
- memset(pdesc, 0, sizeof(*(pdesc)));
- pdesc->stage_id = ce->guc_id.id;
- pdesc->wq_base_addr = desc->wq_addr;
- pdesc->wq_size_bytes = desc->wq_size;
- pdesc->wq_status = WQ_STATUS_ACTIVE;
-
- for_each_child(ce, child) {
- desc = __get_lrc_desc(guc, child->guc_id.id);
-
- desc->engine_class =
- engine_class_to_guc_class(engine->class);
- desc->hw_context_desc = child->lrc.lrca;
- desc->priority = ce->guc_state.prio;
- desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
- guc_context_policy_init(engine, desc);
- }
+ wq_desc = __get_wq_desc(ce);
+ memset(wq_desc, 0, sizeof(*wq_desc));
+ wq_desc->wq_status = WQ_STATUS_ACTIVE;
clear_children_join_go_memory(ce);
}
+}
+
+static int try_context_registration(struct intel_context *ce, bool loop)
+{
+ struct intel_engine_cs *engine = ce->engine;
+ struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ intel_wakeref_t wakeref;
+ u32 ctx_id = ce->guc_id.id;
+ bool context_registered;
+ int ret = 0;
+
+ GEM_BUG_ON(!sched_state_is_init(ce));
+
+ context_registered = ctx_id_mapped(guc, ctx_id);
+
+ clr_ctx_id_mapping(guc, ctx_id);
+ set_ctx_id_mapping(guc, ctx_id, ce);
/*
* The context_lookup xarray is used to determine if the hardware
@@ -2311,7 +2505,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) {
- reset_lrc_desc(guc, desc_idx);
+ clr_ctx_id_mapping(guc, ctx_id);
return 0; /* Will get registered later */
}
@@ -2327,9 +2521,9 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop);
if (unlikely(ret == -EBUSY)) {
- reset_lrc_desc(guc, desc_idx);
+ clr_ctx_id_mapping(guc, ctx_id);
} else if (unlikely(ret == -ENODEV)) {
- reset_lrc_desc(guc, desc_idx);
+ clr_ctx_id_mapping(guc, ctx_id);
ret = 0; /* Will get registered later */
}
}
@@ -2419,7 +2613,7 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
GUC_CONTEXT_DISABLE
};
- GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
+ GEM_BUG_ON(guc_id == GUC_INVALID_CONTEXT_ID);
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_sched_disable(ce);
@@ -2516,7 +2710,7 @@ static bool context_cant_unblock(struct intel_context *ce)
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
context_guc_id_invalid(ce) ||
- !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
+ !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
!intel_context_is_pinned(ce);
}
@@ -2580,13 +2774,11 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- u32 action[] = {
- INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
- guc_id,
- preemption_timeout
- };
+ struct context_policy policy;
- intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+ __guc_context_policy_start_klv(&policy, guc_id);
+ __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_set_context_policies(guc, &policy, true);
}
static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -2686,7 +2878,7 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
bool disabled;
GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
- GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
+ GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce));
@@ -2803,7 +2995,7 @@ static void guc_context_destroy(struct kref *kref)
*/
spin_lock_irqsave(&guc->submission_state.lock, flags);
destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
- !lrc_desc_registered(guc, ce->guc_id.id);
+ !ctx_id_mapped(guc, ce->guc_id.id);
if (likely(!destroy)) {
if (!list_empty(&ce->guc_id.link))
list_del_init(&ce->guc_id.link);
@@ -2831,16 +3023,20 @@ static int guc_context_alloc(struct intel_context *ce)
return lrc_alloc(ce, ce->engine);
}
+static void __guc_context_set_prio(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ struct context_policy policy;
+
+ __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+ __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+ __guc_context_set_context_policies(guc, &policy, true);
+}
+
static void guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce,
u8 prio)
{
- u32 action[] = {
- INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
- ce->guc_id.id,
- prio,
- };
-
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_state.lock);
@@ -2851,9 +3047,9 @@ static void guc_context_set_prio(struct intel_guc *guc,
return;
}
- guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
-
ce->guc_state.prio = prio;
+ __guc_context_set_prio(guc, ce);
+
trace_intel_context_set_prio(ce);
}
@@ -3046,7 +3242,7 @@ static void guc_signal_context_fence(struct intel_context *ce)
static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
- !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
+ !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce));
}
@@ -3123,7 +3319,7 @@ static int guc_request_alloc(struct i915_request *rq)
if (unlikely(ret < 0))
return ret;
if (context_needs_register(ce, !!ret)) {
- ret = guc_lrc_desc_pin(ce, true);
+ ret = try_context_registration(ce, true);
if (unlikely(ret)) { /* unwind */
if (ret == -EPIPE) {
disable_submission(guc);
@@ -3560,7 +3756,7 @@ static void guc_sanitize(struct intel_engine_cs *engine)
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
intel_engine_reset_pinned_contexts(engine);
}
@@ -3595,7 +3791,7 @@ static int guc_resume(struct intel_engine_cs *engine)
setup_hwsp(engine);
start_engine(engine);
- if (engine->class == RENDER_CLASS)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
xehp_enable_ccs_engines(engine);
return 0;
@@ -3614,9 +3810,17 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
static inline void guc_kernel_context_pin(struct intel_guc *guc,
struct intel_context *ce)
{
+ /*
+ * Note: we purposefully do not check the returns below because
+ * the registration can only fail if a reset is just starting.
+ * This is called at the end of reset so presumably another reset
+ * isn't happening and even it did this code would be run again.
+ */
+
if (context_guc_id_invalid(ce))
pin_guc_id(guc, ce);
- guc_lrc_desc_pin(ce, true);
+
+ try_context_registration(ce, true);
}
static inline void guc_init_lrc_mapping(struct intel_guc *guc)
@@ -3634,13 +3838,7 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
* Also, after a reset the of the GuC we want to make sure that the
* information shared with GuC is properly reset. The kernel LRCs are
* not attached to the gem_context, so they need to be added separately.
- *
- * Note: we purposefully do not check the return of guc_lrc_desc_pin,
- * because that function can only fail if a reset is just starting. This
- * is at the end of reset so presumably another reset isn't happening
- * and even it did this code would be run again.
*/
-
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -3680,7 +3878,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->sched_engine->schedule = i915_schedule;
- engine->reset.prepare = guc_reset_nop;
+ engine->reset.prepare = guc_engine_reset_prepare;
engine->reset.rewind = guc_rewind_nop;
engine->reset.cancel = guc_reset_nop;
engine->reset.finish = guc_reset_nop;
@@ -3699,6 +3897,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ /* Wa_14014475959:dg2 */
+ if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
* handled by the firmware so some minor tweaks are required before
@@ -3835,32 +4037,32 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
- guc->submission_state.num_guc_ids = GUC_MAX_LRC_DESCRIPTORS;
+ guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}
static inline struct intel_context *
-g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
+g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
{
struct intel_context *ce;
- if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
+ if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Invalid desc_idx %u", desc_idx);
+ "Invalid ctx_id %u\n", ctx_id);
return NULL;
}
- ce = __get_context(guc, desc_idx);
+ ce = __get_context(guc, ctx_id);
if (unlikely(!ce)) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is NULL, desc_idx %u", desc_idx);
+ "Context is NULL, ctx_id %u\n", ctx_id);
return NULL;
}
if (unlikely(intel_context_is_child(ce))) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is child, desc_idx %u", desc_idx);
+ "Context is child, ctx_id %u\n", ctx_id);
return NULL;
}
@@ -3872,14 +4074,15 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
u32 len)
{
struct intel_context *ce;
- u32 desc_idx = msg[0];
+ u32 ctx_id;
if (unlikely(len < 1)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
return -EPROTO;
}
+ ctx_id = msg[0];
- ce = g2h_context_lookup(guc, desc_idx);
+ ce = g2h_context_lookup(guc, ctx_id);
if (unlikely(!ce))
return -EPROTO;
@@ -3923,14 +4126,15 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
{
struct intel_context *ce;
unsigned long flags;
- u32 desc_idx = msg[0];
+ u32 ctx_id;
if (unlikely(len < 2)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
return -EPROTO;
}
+ ctx_id = msg[0];
- ce = g2h_context_lookup(guc, desc_idx);
+ ce = g2h_context_lookup(guc, ctx_id);
if (unlikely(!ce))
return -EPROTO;
@@ -3938,8 +4142,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Bad context sched_state 0x%x, desc_idx %u",
- ce->guc_state.sched_state, desc_idx);
+ "Bad context sched_state 0x%x, ctx_id %u\n",
+ ce->guc_state.sched_state, ctx_id);
return -EPROTO;
}
@@ -4005,7 +4209,7 @@ static void capture_error_state(struct intel_guc *guc,
intel_engine_set_hung_context(engine, ce);
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- i915_capture_error_state(gt, engine->mask);
+ i915_capture_error_state(gt, engine->mask, CORE_DUMP_FLAG_IS_GUC_CAPTURE);
atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
}
@@ -4037,14 +4241,14 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
{
struct intel_context *ce;
unsigned long flags;
- int desc_idx;
+ int ctx_id;
if (unlikely(len != 1)) {
drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
return -EPROTO;
}
- desc_idx = msg[0];
+ ctx_id = msg[0];
/*
* The context lookup uses the xarray but lookups only require an RCU lock
@@ -4053,7 +4257,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
* asynchronously until the reset is done.
*/
xa_lock_irqsave(&guc->context_lookup, flags);
- ce = g2h_context_lookup(guc, desc_idx);
+ ce = g2h_context_lookup(guc, ctx_id);
if (ce)
intel_context_get(ce);
xa_unlock_irqrestore(&guc->context_lookup, flags);
@@ -4070,23 +4274,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
int intel_guc_error_capture_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
- int status;
+ u32 status;
if (unlikely(len != 1)) {
drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
return -EPROTO;
}
- status = msg[0];
- drm_info(&guc_to_gt(guc)->i915->drm, "Got error capture: status = %d", status);
+ status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
+ if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
+ drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space");
- /* FIXME: Do something with the capture */
+ intel_guc_capture_process(guc);
return 0;
}
-static struct intel_engine_cs *
-guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
+struct intel_engine_cs *
+intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
{
struct intel_gt *gt = guc_to_gt(guc);
u8 engine_class = guc_class_to_engine_class(guc_class);
@@ -4135,7 +4340,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
instance = msg[1];
reason = msg[2];
- engine = guc_lookup_engine(guc, guc_class, instance);
+ engine = intel_guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
drm_err(&gt->i915->drm,
"Invalid engine %d:%d", guc_class, instance);
@@ -4333,17 +4538,17 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
guc_log_context_priority(p, ce);
if (intel_context_is_parent(ce)) {
- struct guc_process_desc *desc = __get_process_desc(ce);
+ struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
struct intel_context *child;
drm_printf(p, "\t\tNumber children: %u\n",
ce->parallel.number_children);
drm_printf(p, "\t\tWQI Head: %u\n",
- READ_ONCE(desc->head));
+ READ_ONCE(wq_desc->head));
drm_printf(p, "\t\tWQI Tail: %u\n",
- READ_ONCE(desc->tail));
+ READ_ONCE(wq_desc->tail));
drm_printf(p, "\t\tWQI Status: %u\n\n",
- READ_ONCE(desc->wq_status));
+ READ_ONCE(wq_desc->wq_status));
if (ce->engine->emit_bb_start ==
emit_bb_start_parent_no_preempt_mid_batch) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 541f180aaa29..a876d39e6bcf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,21 +53,21 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* firmware as TGL.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
- fw_def(ALDERLAKE_P, 0, guc_def(adlp, 69, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_def(tgl, 69, 0, 3)) \
- fw_def(DG1, 0, guc_def(dg1, 69, 0, 3)) \
- fw_def(ROCKETLAKE, 0, guc_def(tgl, 69, 0, 3)) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 69, 0, 3)) \
- fw_def(JASPERLAKE, 0, guc_def(ehl, 69, 0, 3)) \
- fw_def(ELKHARTLAKE, 0, guc_def(ehl, 69, 0, 3)) \
- fw_def(ICELAKE, 0, guc_def(icl, 69, 0, 3)) \
- fw_def(COMETLAKE, 5, guc_def(cml, 69, 0, 3)) \
- fw_def(COMETLAKE, 0, guc_def(kbl, 69, 0, 3)) \
- fw_def(COFFEELAKE, 0, guc_def(kbl, 69, 0, 3)) \
- fw_def(GEMINILAKE, 0, guc_def(glk, 69, 0, 3)) \
- fw_def(KABYLAKE, 0, guc_def(kbl, 69, 0, 3)) \
- fw_def(BROXTON, 0, guc_def(bxt, 69, 0, 3)) \
- fw_def(SKYLAKE, 0, guc_def(skl, 69, 0, 3))
+ fw_def(ALDERLAKE_P, 0, guc_def(adlp, 70, 1, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_def(tgl, 70, 1, 1)) \
+ fw_def(DG1, 0, guc_def(dg1, 70, 1, 1)) \
+ fw_def(ROCKETLAKE, 0, guc_def(tgl, 70, 1, 1)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 70, 1, 1)) \
+ fw_def(JASPERLAKE, 0, guc_def(ehl, 70, 1, 1)) \
+ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 70, 1, 1)) \
+ fw_def(ICELAKE, 0, guc_def(icl, 70, 1, 1)) \
+ fw_def(COMETLAKE, 5, guc_def(cml, 70, 1, 1)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 70, 1, 1)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 70, 1, 1)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 70, 1, 1)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 70, 1, 1)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 70, 1, 1)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 70, 1, 1))
#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
fw_def(ALDERLAKE_P, 0, huc_def(tgl, 7, 9, 3)) \
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index a115894d5896..1df71d0796ae 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -148,7 +148,7 @@ static int intel_guc_steal_guc_ids(void *arg)
struct i915_request *spin_rq = NULL, *rq, *last = NULL;
int number_guc_id_stolen = guc->number_guc_id_stolen;
- ce = kzalloc(sizeof(*ce) * GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL);
+ ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
if (!ce) {
pr_err("Context array allocation failed\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 02239395ce81..94e5c29d2ee3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -309,7 +309,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES);
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
+
if (IS_ERR(gpu))
return PTR_ERR(gpu);
@@ -582,8 +583,9 @@ static int i915_wedged_get(void *data, u64 *val)
static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ intel_gt_debugfs_reset_store(to_gt(i915), val);
- return intel_gt_debugfs_reset_store(to_gt(i915), val);
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
@@ -731,15 +733,17 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
- return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
+ return 0;
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
+ intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
- return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
+ return 0;
}
static const struct file_operations i915_forcewake_fops = {
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 09de45d3e274..3ffb617d75c9 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -77,6 +77,7 @@
#include "i915_file_private.h"
#include "i915_debugfs.h"
#include "i915_driver.h"
+#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_getparam.h"
#include "i915_ioc32.h"
@@ -322,9 +323,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_step_init(dev_priv);
- intel_gt_init_early(to_gt(dev_priv), dev_priv);
intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
- intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv));
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -355,7 +354,9 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_wopcm_init_early(&dev_priv->wopcm);
- __intel_gt_init_early(to_gt(dev_priv), dev_priv);
+ intel_root_gt_init_early(dev_priv);
+
+ i915_drm_clients_init(&dev_priv->clients, dev_priv);
i915_gem_init_early(dev_priv);
@@ -376,7 +377,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(to_gt(dev_priv));
+ intel_gt_driver_late_release_all(dev_priv);
+ i915_drm_clients_fini(&dev_priv->clients);
intel_region_ttm_device_fini(dev_priv);
err_ttm:
vlv_suspend_cleanup(dev_priv);
@@ -395,7 +397,8 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(to_gt(dev_priv));
+ intel_gt_driver_late_release_all(dev_priv);
+ i915_drm_clients_fini(&dev_priv->clients);
intel_region_ttm_device_fini(dev_priv);
vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
@@ -426,13 +429,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = intel_uncore_setup_mmio(&dev_priv->uncore);
- if (ret < 0)
- goto err_bridge;
-
ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret)
- goto err_mmio;
+ return ret;
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev_priv);
@@ -450,9 +449,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
err_uncore:
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
-err_mmio:
- intel_uncore_cleanup_mmio(&dev_priv->uncore);
-err_bridge:
pci_dev_put(dev_priv->bridge_dev);
return ret;
@@ -466,7 +462,6 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
- intel_uncore_cleanup_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -599,7 +594,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
- ret = intel_gt_probe_lmem(to_gt(dev_priv));
+ ret = intel_gt_tiles_init(dev_priv);
if (ret)
goto err_mem_regions;
@@ -850,10 +845,14 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_vgpu_detect(i915);
- ret = i915_driver_mmio_probe(i915);
+ ret = intel_gt_probe_all(i915);
if (ret < 0)
goto out_runtime_pm_put;
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+ goto out_tiles_cleanup;
+
ret = i915_driver_hw_probe(i915);
if (ret < 0)
goto out_cleanup_mmio;
@@ -910,6 +909,8 @@ out_cleanup_hw:
i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
i915_driver_mmio_release(i915);
+out_tiles_cleanup:
+ intel_gt_release_all(i915);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_driver_late_release(i915);
@@ -1013,6 +1014,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
i915_gem_context_close(file);
+ i915_drm_client_put(file_priv->client);
kfree_rcu(file_priv, rcu);
@@ -1743,6 +1745,9 @@ static const struct file_operations i915_driver_fops = {
.read = drm_read,
.compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = i915_drm_client_fdinfo,
+#endif
};
static int
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
new file mode 100644
index 000000000000..475a6f824cad
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include <drm/drm_print.h>
+
+#include "gem/i915_gem_context.h"
+#include "i915_drm_client.h"
+#include "i915_file_private.h"
+#include "i915_gem.h"
+#include "i915_utils.h"
+
+void i915_drm_clients_init(struct i915_drm_clients *clients,
+ struct drm_i915_private *i915)
+{
+ clients->i915 = i915;
+ clients->next_id = 0;
+
+ xa_init_flags(&clients->xarray, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
+}
+
+struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients)
+{
+ struct i915_drm_client *client;
+ struct xarray *xa = &clients->xarray;
+ int ret;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock_irq(xa);
+ ret = __xa_alloc_cyclic(xa, &client->id, client, xa_limit_32b,
+ &clients->next_id, GFP_KERNEL);
+ xa_unlock_irq(xa);
+ if (ret < 0)
+ goto err;
+
+ kref_init(&client->kref);
+ spin_lock_init(&client->ctx_lock);
+ INIT_LIST_HEAD(&client->ctx_list);
+ client->clients = clients;
+
+ return client;
+
+err:
+ kfree(client);
+
+ return ERR_PTR(ret);
+}
+
+void __i915_drm_client_free(struct kref *kref)
+{
+ struct i915_drm_client *client =
+ container_of(kref, typeof(*client), kref);
+ struct xarray *xa = &client->clients->xarray;
+ unsigned long flags;
+
+ xa_lock_irqsave(xa, flags);
+ __xa_erase(xa, client->id);
+ xa_unlock_irqrestore(xa, flags);
+ kfree(client);
+}
+
+void i915_drm_clients_fini(struct i915_drm_clients *clients)
+{
+ GEM_BUG_ON(!xa_empty(&clients->xarray));
+ xa_destroy(&clients->xarray);
+}
+
+#ifdef CONFIG_PROC_FS
+static const char * const uabi_class_names[] = {
+ [I915_ENGINE_CLASS_RENDER] = "render",
+ [I915_ENGINE_CLASS_COPY] = "copy",
+ [I915_ENGINE_CLASS_VIDEO] = "video",
+ [I915_ENGINE_CLASS_VIDEO_ENHANCE] = "video-enhance",
+};
+
+static u64 busy_add(struct i915_gem_context *ctx, unsigned int class)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ u64 total = 0;
+
+ for_each_gem_engine(ce, rcu_dereference(ctx->engines), it) {
+ if (ce->engine->uabi_class != class)
+ continue;
+
+ total += intel_context_get_total_runtime_ns(ce);
+ }
+
+ return total;
+}
+
+static void
+show_client_class(struct seq_file *m,
+ struct i915_drm_client *client,
+ unsigned int class)
+{
+ const struct list_head *list = &client->ctx_list;
+ u64 total = atomic64_read(&client->past_runtime[class]);
+ const unsigned int capacity =
+ client->clients->i915->engine_uabi_class_count[class];
+ struct i915_gem_context *ctx;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, list, client_link)
+ total += busy_add(ctx, class);
+ rcu_read_unlock();
+
+ seq_printf(m, "drm-engine-%s:\t%llu ns\n",
+ uabi_class_names[class], total);
+
+ if (capacity > 1)
+ seq_printf(m, "drm-engine-capacity-%s:\t%u\n",
+ uabi_class_names[class],
+ capacity);
+}
+
+void i915_drm_client_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct drm_file *file = f->private_data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_drm_client *client = file_priv->client;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ unsigned int i;
+
+ /*
+ * ******************************************************************
+ * For text output format description please see drm-usage-stats.rst!
+ * ******************************************************************
+ */
+
+ seq_printf(m, "drm-driver:\t%s\n", i915->drm.driver->name);
+ seq_printf(m, "drm-pdev:\t%04x:%02x:%02x.%d\n",
+ pci_domain_nr(pdev->bus), pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ seq_printf(m, "drm-client-id:\t%u\n", client->id);
+
+ /*
+ * Temporarily skip showing client engine information with GuC submission till
+ * fetching engine busyness is implemented in the GuC submission backend
+ */
+ if (GRAPHICS_VER(i915) < 8 || intel_uc_uses_guc_submission(&i915->gt0.uc))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
+ show_client_class(m, client, i);
+}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
new file mode 100644
index 000000000000..5f5b02b01ba0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_DRM_CLIENT_H__
+#define __I915_DRM_CLIENT_H__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+
+#include "gt/intel_engine_types.h"
+
+#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_VIDEO_ENHANCE
+
+struct drm_i915_private;
+
+struct i915_drm_clients {
+ struct drm_i915_private *i915;
+
+ struct xarray xarray;
+ u32 next_id;
+};
+
+struct i915_drm_client {
+ struct kref kref;
+
+ unsigned int id;
+
+ spinlock_t ctx_lock; /* For add/remove from ctx_list. */
+ struct list_head ctx_list; /* List of contexts belonging to client. */
+
+ struct i915_drm_clients *clients;
+
+ /**
+ * @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
+ */
+ atomic64_t past_runtime[I915_LAST_UABI_ENGINE_CLASS + 1];
+};
+
+void i915_drm_clients_init(struct i915_drm_clients *clients,
+ struct drm_i915_private *i915);
+
+static inline struct i915_drm_client *
+i915_drm_client_get(struct i915_drm_client *client)
+{
+ kref_get(&client->kref);
+ return client;
+}
+
+void __i915_drm_client_free(struct kref *kref);
+
+static inline void i915_drm_client_put(struct i915_drm_client *client)
+{
+ kref_put(&client->kref, __i915_drm_client_free);
+}
+
+struct i915_drm_client *i915_drm_client_add(struct i915_drm_clients *clients);
+
+#ifdef CONFIG_PROC_FS
+void i915_drm_client_fdinfo(struct seq_file *m, struct file *f);
+#endif
+
+void i915_drm_clients_fini(struct i915_drm_clients *clients);
+
+#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce2cd6491d6d..a6cf9716d6aa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -61,6 +61,7 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
+#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
#include "i915_params.h"
@@ -500,6 +501,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct rb_root uabi_engines;
+ unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
struct resource mch_res;
@@ -766,6 +768,14 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct intel_gt gt0;
+ /*
+ * i915->gt[0] == &i915->gt0
+ */
+#define I915_MAX_GT 4
+ struct intel_gt *gt[I915_MAX_GT];
+
+ struct kobject *sysfs_gt;
+
struct {
struct i915_gem_contexts {
spinlock_t lock; /* locks list */
@@ -810,6 +820,8 @@ struct drm_i915_private {
struct i915_pmu pmu;
+ struct i915_drm_clients clients;
+
struct i915_hdcp_comp_master *hdcp_master;
bool hdcp_comp_added;
@@ -1201,6 +1213,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
((gt)->info.engine_mask & \
GENMASK(first__ + count__ - 1, first__)) >> first__; \
})
+#define RCS_MASK(gt) \
+ ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS)
#define VDBOX_MASK(gt) \
ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
#define VEBOX_MASK(gt) \
@@ -1294,6 +1308,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc)
+#define HAS_HECI_PXP(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_heci_pxp)
+
+#define HAS_HECI_GSCFI(dev_priv) \
+ (INTEL_INFO(dev_priv)->has_heci_gscfi)
+
+#define HAS_HECI_GSC(dev_priv) (HAS_HECI_PXP(dev_priv) || HAS_HECI_GSCFI(dev_priv))
+
#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12)
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
@@ -1363,6 +1385,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GUC_DEPRIVILEGE(dev_priv) \
(INTEL_INFO(dev_priv)->has_guc_deprivilege)
+#define HAS_PERCTX_PREEMPT_CTRL(i915) \
+ ((GRAPHICS_VER(i915) >= 9) && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
+
#define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
IS_ALDERLAKE_S(dev_priv))
diff --git a/drivers/gpu/drm/i915/i915_file_private.h b/drivers/gpu/drm/i915/i915_file_private.h
index fb16cc431b2a..f42877869692 100644
--- a/drivers/gpu/drm/i915/i915_file_private.h
+++ b/drivers/gpu/drm/i915/i915_file_private.h
@@ -12,6 +12,7 @@
struct drm_i915_private;
struct drm_file;
+struct i915_drm_client;
struct drm_i915_file_private {
struct drm_i915_private *dev_priv;
@@ -103,6 +104,8 @@ struct drm_i915_file_private {
/** ban_score: Accumulated score of all ctx bans and fast hangs. */
atomic_t ban_score;
unsigned long hang_timestamp;
+
+ struct i915_drm_client *client;
};
#endif /* __I915_FILE_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2e10187cd0a0..702e5b89be22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -118,6 +118,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags)
{
struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
+ bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
LIST_HEAD(still_in_list);
intel_wakeref_t wakeref;
struct i915_vma *vma;
@@ -142,8 +143,6 @@ try_again:
while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
- struct i915_address_space *vm = vma->vm;
-
list_move_tail(&vma->obj_link, &still_in_list);
if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
continue;
@@ -153,40 +152,44 @@ try_again:
break;
}
+ /*
+ * Requiring the vm destructor to take the object lock
+ * before destroying a vma would help us eliminate the
+ * i915_vm_tryget() here, AND thus also the barrier stuff
+ * at the end. That's an easy fix, but sleeping locks in
+ * a kthread should generally be avoided.
+ */
ret = -EAGAIN;
- if (!i915_vm_tryopen(vm))
+ if (!i915_vm_tryget(vma->vm))
break;
- /* Prevent vma being freed by i915_vma_parked as we unbind */
- vma = __i915_vma_get(vma);
spin_unlock(&obj->vma.lock);
- if (vma) {
- bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
- ret = -EBUSY;
- if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
- assert_object_held(vma->obj);
- ret = i915_vma_unbind_async(vma, vm_trylock);
- }
+ /*
+ * Since i915_vma_parked() takes the object lock
+ * before vma destruction, it won't race us here,
+ * and destroy the vma from under us.
+ */
- if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
- !i915_vma_is_active(vma))) {
- if (vm_trylock) {
- if (mutex_trylock(&vma->vm->mutex)) {
- ret = __i915_vma_unbind(vma);
- mutex_unlock(&vma->vm->mutex);
- } else {
- ret = -EBUSY;
- }
- } else {
- ret = i915_vma_unbind(vma);
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
+ assert_object_held(vma->obj);
+ ret = i915_vma_unbind_async(vma, vm_trylock);
+ }
+
+ if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))) {
+ if (vm_trylock) {
+ if (mutex_trylock(&vma->vm->mutex)) {
+ ret = __i915_vma_unbind(vma);
+ mutex_unlock(&vma->vm->mutex);
}
+ } else {
+ ret = i915_vma_unbind(vma);
}
-
- __i915_vma_put(vma);
}
- i915_vm_close(vm);
+ i915_vm_put(vma->vm);
spin_lock(&obj->vma.lock);
}
list_splice_init(&still_in_list, &obj->vma.list);
@@ -936,8 +939,19 @@ new_vma:
if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
return ERR_PTR(-ENOSPC);
+ /*
+ * If this misplaced vma is too big (i.e, at-least
+ * half the size of aperture) or hasn't been pinned
+ * mappable before, we ignore the misplacement when
+ * PIN_NONBLOCK is set in order to avoid the ping-pong
+ * issue described above. In other words, we try to
+ * avoid the costly operation of unbinding this vma
+ * from the GGTT and rebinding it back because there
+ * may not be enough space for this vma in the aperture.
+ */
if (flags & PIN_MAPPABLE &&
- vma->fence_size > ggtt->mappable_end / 2)
+ (vma->fence_size > ggtt->mappable_end / 2 ||
+ !i915_vma_is_map_and_fenceable(vma)))
return ERR_PTR(-ENOSPC);
}
@@ -1213,25 +1227,40 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
- int ret;
+ struct i915_drm_client *client;
+ int ret = -ENOMEM;
DRM_DEBUG("\n");
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
- return -ENOMEM;
+ goto err_alloc;
+
+ client = i915_drm_client_add(&i915->clients);
+ if (IS_ERR(client)) {
+ ret = PTR_ERR(client);
+ goto err_client;
+ }
file->driver_priv = file_priv;
file_priv->dev_priv = i915;
file_priv->file = file;
+ file_priv->client = client;
file_priv->bsd_engine = -1;
file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
- kfree(file_priv);
+ goto err_context;
+
+ return 0;
+err_context:
+ i915_drm_client_put(client);
+err_client:
+ kfree(file_priv);
+err_alloc:
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d0e7ee7b07df..0512c66fa4f3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -48,6 +48,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
+#include "gt/uc/intel_guc_capture.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -511,13 +512,10 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- const u32 period = to_gt(m->i915)->clock_period_ns;
-
err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
ctx->guilty, ctx->active,
- ctx->total_runtime * period,
- mul_u32_u32(ctx->avg_runtime, period));
+ ctx->total_runtime, ctx->avg_runtime);
}
static struct i915_vma_coredump *
@@ -532,8 +530,8 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
-static struct i915_vma_coredump *
-find_batch(const struct intel_engine_coredump *ee)
+struct i915_vma_coredump *
+intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
}
@@ -561,7 +559,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_instdone(m, ee);
- batch = find_batch(ee);
+ batch = intel_gpu_error_find_batch(ee);
if (batch) {
u64 start = batch->gtt_offset;
u64 end = start + batch->gtt_size;
@@ -596,15 +594,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " hung: %u\n", ee->hung);
- err_printf(m, " engine reset count: %u\n", ee->reset_count);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
error_print_request(m, " ", &ee->execlist[n]);
}
-
- error_print_context(m, " Active context: ", &ee->context);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -616,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static void print_error_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma)
+void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@@ -687,7 +681,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
- print_error_vma(m, NULL, error_uc->guc_log);
+ intel_gpu_error_print_vma(m, NULL, error_uc->guc_log);
}
static void err_free_sgl(struct scatterlist *sgl)
@@ -713,26 +707,33 @@ static void err_print_gt_info(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
intel_gt_info_print(&gt->info, &p);
- intel_sseu_print_topology(&gt->info.sseu, &p);
+ intel_sseu_print_topology(gt->_gt->i915, &gt->info.sseu, &p);
}
-static void err_print_gt(struct drm_i915_error_state_buf *m,
- struct intel_gt_coredump *gt)
+static void err_print_gt_display(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ err_printf(m, "IER: 0x%08x\n", gt->ier);
+ err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+}
+
+static void err_print_gt_global_nonguc(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
{
- const struct intel_engine_coredump *ee;
int i;
err_printf(m, "GT awake: %s\n", str_yes_no(gt->awake));
err_printf(m, "EIR: 0x%08x\n", gt->eir);
- err_printf(m, "IER: 0x%08x\n", gt->ier);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
+
for (i = 0; i < gt->ngtier; i++)
err_printf(m, "GTIER[%d]: 0x%08x\n", i, gt->gtier[i]);
- err_printf(m, "PGTBL_ER: 0x%08x\n", gt->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", gt->derrmr);
+}
- for (i = 0; i < gt->nfence; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+static void err_print_gt_global(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ err_printf(m, "FORCEWAKE: 0x%08x\n", gt->forcewake);
if (IS_GRAPHICS_VER(m->i915, 6, 11)) {
err_printf(m, "ERROR: 0x%08x\n", gt->error);
@@ -755,7 +756,7 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) >= 12) {
int i;
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ for (i = 0; i < I915_MAX_SFC; i++) {
/*
* SFC_DONE resides in the VD forcewake domain, so it
* only exists if the corresponding VCS engine is
@@ -771,19 +772,38 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
err_printf(m, " GAM_DONE: 0x%08x\n", gt->gam_done);
}
+}
+
+static void err_print_gt_fences(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ int i;
+
+ for (i = 0; i < gt->nfence; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, gt->fence[i]);
+}
+
+static void err_print_gt_engines(struct drm_i915_error_state_buf *m,
+ struct intel_gt_coredump *gt)
+{
+ const struct intel_engine_coredump *ee;
for (ee = gt->engine; ee; ee = ee->next) {
const struct i915_vma_coredump *vma;
- error_print_engine(m, ee);
+ if (ee->guc_capture_node)
+ intel_guc_capture_print_engine_node(m, ee);
+ else
+ error_print_engine(m, ee);
+
+ err_printf(m, " hung: %u\n", ee->hung);
+ err_printf(m, " engine reset count: %u\n", ee->reset_count);
+ error_print_context(m, " Active context: ", &ee->context);
+
for (vma = ee->vma; vma; vma = vma->next)
- print_error_vma(m, ee->engine, vma);
+ intel_gpu_error_print_vma(m, ee->engine, vma);
}
- if (gt->uc)
- err_print_uc(m, gt->uc);
-
- err_print_gt_info(m, gt);
}
static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
@@ -831,8 +851,30 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "RPM wakelock: %s\n", str_yes_no(error->wakelock));
err_printf(m, "PM suspended: %s\n", str_yes_no(error->suspended));
- if (error->gt)
- err_print_gt(m, error->gt);
+ if (error->gt) {
+ bool print_guc_capture = false;
+
+ if (error->gt->uc && error->gt->uc->is_guc_capture)
+ print_guc_capture = true;
+
+ err_print_gt_display(m, error->gt);
+ err_print_gt_global_nonguc(m, error->gt);
+ err_print_gt_fences(m, error->gt);
+
+ /*
+ * GuC dumped global, eng-class and eng-instance registers together
+ * as part of engine state dump so we print in err_print_gt_engines
+ */
+ if (!print_guc_capture)
+ err_print_gt_global(m, error->gt);
+
+ err_print_gt_engines(m, error->gt);
+
+ if (error->gt->uc)
+ err_print_uc(m, error->gt->uc);
+
+ err_print_gt_info(m, error->gt);
+ }
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -980,6 +1022,7 @@ static void cleanup_gt(struct intel_gt_coredump *gt)
gt->engine = ee->next;
i915_vma_coredump_free(ee->vma);
+ intel_guc_capture_free_node(ee);
kfree(ee);
}
@@ -1313,8 +1356,8 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- e->total_runtime = rq->context->runtime.total;
- e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
+ e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
+ e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
simulated = i915_gem_context_no_error_capture(ctx);
@@ -1431,7 +1474,7 @@ static void add_vma_coredump(struct intel_engine_coredump *ee,
}
struct intel_engine_coredump *
-intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
{
struct intel_engine_coredump *ee;
@@ -1441,8 +1484,10 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
ee->engine = engine;
- engine_record_registers(ee);
- engine_record_execlists(ee);
+ if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)) {
+ engine_record_registers(ee);
+ engine_record_execlists(ee);
+ }
return ee;
}
@@ -1506,7 +1551,8 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs *engine,
- struct i915_vma_compress *compress)
+ struct i915_vma_compress *compress,
+ u32 dump_flags)
{
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
@@ -1514,7 +1560,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
if (!ee)
return NULL;
@@ -1547,6 +1593,8 @@ capture_engine(struct intel_engine_cs *engine,
i915_request_put(rq);
goto no_request_capture;
}
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ intel_guc_capture_get_matching_node(engine->gt, ee, ce);
intel_engine_coredump_add_vma(ee, capture, compress);
i915_request_put(rq);
@@ -1561,7 +1609,8 @@ no_request_capture:
static void
gt_record_engines(struct intel_gt_coredump *gt,
intel_engine_mask_t engine_mask,
- struct i915_vma_compress *compress)
+ struct i915_vma_compress *compress,
+ u32 dump_flags)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1572,7 +1621,7 @@ gt_record_engines(struct intel_gt_coredump *gt,
/* Refill our page pool before entering atomic section */
pool_refill(&compress->pool, ALLOW_FAIL);
- ee = capture_engine(engine, compress);
+ ee = capture_engine(engine, compress, dump_flags);
if (!ee)
continue;
@@ -1580,6 +1629,8 @@ gt_record_engines(struct intel_gt_coredump *gt,
gt->simulated |= ee->simulated;
if (ee->simulated) {
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ intel_guc_capture_free_node(ee);
kfree(ee);
continue;
}
@@ -1615,8 +1666,74 @@ gt_record_uc(struct intel_gt_coredump *gt,
return error_uc;
}
-/* Capture all registers which don't fit into another category. */
-static void gt_record_regs(struct intel_gt_coredump *gt)
+/* Capture display registers. */
+static void gt_record_display_regs(struct intel_gt_coredump *gt)
+{
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
+
+ if (GRAPHICS_VER(i915) >= 6)
+ gt->derrmr = intel_uncore_read(uncore, DERRMR);
+
+ if (GRAPHICS_VER(i915) >= 8)
+ gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
+ else if (IS_VALLEYVIEW(i915))
+ gt->ier = intel_uncore_read(uncore, VLV_IER);
+ else if (HAS_PCH_SPLIT(i915))
+ gt->ier = intel_uncore_read(uncore, DEIER);
+ else if (GRAPHICS_VER(i915) == 2)
+ gt->ier = intel_uncore_read16(uncore, GEN2_IER);
+ else
+ gt->ier = intel_uncore_read(uncore, GEN2_IER);
+}
+
+/* Capture all other registers that GuC doesn't capture. */
+static void gt_record_global_nonguc_regs(struct intel_gt_coredump *gt)
+{
+ struct intel_uncore *uncore = gt->_gt->uncore;
+ struct drm_i915_private *i915 = uncore->i915;
+ int i;
+
+ if (IS_VALLEYVIEW(i915)) {
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
+ } else if (GRAPHICS_VER(i915) >= 11) {
+ gt->gtier[0] =
+ intel_uncore_read(uncore,
+ GEN11_RENDER_COPY_INTR_ENABLE);
+ gt->gtier[1] =
+ intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
+ gt->gtier[2] =
+ intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
+ gt->gtier[3] =
+ intel_uncore_read(uncore,
+ GEN11_GPM_WGBOXPERF_INTR_ENABLE);
+ gt->gtier[4] =
+ intel_uncore_read(uncore,
+ GEN11_CRYPTO_RSVD_INTR_ENABLE);
+ gt->gtier[5] =
+ intel_uncore_read(uncore,
+ GEN11_GUNIT_CSME_INTR_ENABLE);
+ gt->ngtier = 6;
+ } else if (GRAPHICS_VER(i915) >= 8) {
+ for (i = 0; i < 4; i++)
+ gt->gtier[i] =
+ intel_uncore_read(uncore, GEN8_GT_IER(i));
+ gt->ngtier = 4;
+ } else if (HAS_PCH_SPLIT(i915)) {
+ gt->gtier[0] = intel_uncore_read(uncore, GTIER);
+ gt->ngtier = 1;
+ }
+
+ gt->eir = intel_uncore_read(uncore, EIR);
+ gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
+}
+
+/*
+ * Capture all registers that relate to workload submission.
+ * NOTE: In GuC submission, when GuC resets an engine, it can dump these for us
+ */
+static void gt_record_global_regs(struct intel_gt_coredump *gt)
{
struct intel_uncore *uncore = gt->_gt->uncore;
struct drm_i915_private *i915 = uncore->i915;
@@ -1632,11 +1749,8 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
*/
/* 1: Registers specific to a single generation */
- if (IS_VALLEYVIEW(i915)) {
- gt->gtier[0] = intel_uncore_read(uncore, GTIER);
- gt->ier = intel_uncore_read(uncore, VLV_IER);
+ if (IS_VALLEYVIEW(i915))
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
- }
if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
@@ -1664,7 +1778,6 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
if (GRAPHICS_VER(i915) >= 6) {
- gt->derrmr = intel_uncore_read(uncore, DERRMR);
if (GRAPHICS_VER(i915) < 12) {
gt->error = intel_uncore_read(uncore, ERROR_GEN6);
gt->done_reg = intel_uncore_read(uncore, DONE_REG);
@@ -1684,7 +1797,7 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG);
if (GRAPHICS_VER(i915) >= 12) {
- for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
+ for (i = 0; i < I915_MAX_SFC; i++) {
/*
* SFC_DONE resides in the VD forcewake domain, so it
* only exists if the corresponding VCS engine is
@@ -1700,44 +1813,6 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
gt->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE);
}
-
- /* 4: Everything else */
- if (GRAPHICS_VER(i915) >= 11) {
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- gt->gtier[0] =
- intel_uncore_read(uncore,
- GEN11_RENDER_COPY_INTR_ENABLE);
- gt->gtier[1] =
- intel_uncore_read(uncore, GEN11_VCS_VECS_INTR_ENABLE);
- gt->gtier[2] =
- intel_uncore_read(uncore, GEN11_GUC_SG_INTR_ENABLE);
- gt->gtier[3] =
- intel_uncore_read(uncore,
- GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- gt->gtier[4] =
- intel_uncore_read(uncore,
- GEN11_CRYPTO_RSVD_INTR_ENABLE);
- gt->gtier[5] =
- intel_uncore_read(uncore,
- GEN11_GUNIT_CSME_INTR_ENABLE);
- gt->ngtier = 6;
- } else if (GRAPHICS_VER(i915) >= 8) {
- gt->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER);
- for (i = 0; i < 4; i++)
- gt->gtier[i] =
- intel_uncore_read(uncore, GEN8_GT_IER(i));
- gt->ngtier = 4;
- } else if (HAS_PCH_SPLIT(i915)) {
- gt->ier = intel_uncore_read(uncore, DEIER);
- gt->gtier[0] = intel_uncore_read(uncore, GTIER);
- gt->ngtier = 1;
- } else if (GRAPHICS_VER(i915) == 2) {
- gt->ier = intel_uncore_read16(uncore, GEN2_IER);
- } else if (!IS_VALLEYVIEW(i915)) {
- gt->ier = intel_uncore_read(uncore, GEN2_IER);
- }
- gt->eir = intel_uncore_read(uncore, EIR);
- gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER);
}
static void gt_record_info(struct intel_gt_coredump *gt)
@@ -1849,7 +1924,7 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
struct intel_gt_coredump *
-intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
{
struct intel_gt_coredump *gc;
@@ -1860,7 +1935,21 @@ intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
gc->_gt = gt;
gc->awake = intel_gt_pm_is_awake(gt);
- gt_record_regs(gc);
+ gt_record_display_regs(gc);
+ gt_record_global_nonguc_regs(gc);
+
+ /*
+ * GuC dumps global, eng-class and eng-instance registers
+ * (that can change as part of engine state during execution)
+ * before an engine is reset due to a hung context.
+ * GuC captures and reports all three groups of registers
+ * together as a single set before the engine is reset.
+ * Thus, if GuC triggered the context reset we retrieve
+ * the register values as part of gt_record_engines.
+ */
+ if (!(dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE))
+ gt_record_global_regs(gc);
+
gt_record_fences(gc);
return gc;
@@ -1894,7 +1983,7 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt,
}
static struct i915_gpu_coredump *
-__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct drm_i915_private *i915 = gt->i915;
struct i915_gpu_coredump *error;
@@ -1908,7 +1997,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
if (!error)
return ERR_PTR(-ENOMEM);
- error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
+ error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL, dump_flags);
if (error->gt) {
struct i915_vma_compress *compress;
@@ -1919,11 +2008,19 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
return ERR_PTR(-ENOMEM);
}
+ if (INTEL_INFO(i915)->has_gt_uc) {
+ error->gt->uc = gt_record_uc(error->gt, compress);
+ if (error->gt->uc) {
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ error->gt->uc->is_guc_capture = true;
+ else
+ GEM_BUG_ON(error->gt->uc->is_guc_capture);
+ }
+ }
+
gt_record_info(error->gt);
- gt_record_engines(error->gt, engine_mask, compress);
+ gt_record_engines(error->gt, engine_mask, compress, dump_flags);
- if (INTEL_INFO(i915)->has_gt_uc)
- error->gt->uc = gt_record_uc(error->gt, compress);
i915_vma_capture_finish(error->gt, compress);
@@ -1936,7 +2033,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
}
struct i915_gpu_coredump *
-i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
int ret = mutex_lock_interruptible(&capture_mutex);
@@ -1945,7 +2042,7 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
if (ret)
return ERR_PTR(ret);
- dump = __i915_gpu_coredump(gt, engine_mask);
+ dump = __i915_gpu_coredump(gt, engine_mask, dump_flags);
mutex_unlock(&capture_mutex);
return dump;
@@ -1992,11 +2089,11 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
* to pick up.
*/
void i915_capture_error_state(struct intel_gt *gt,
- intel_engine_mask_t engine_mask)
+ intel_engine_mask_t engine_mask, u32 dump_flags)
{
struct i915_gpu_coredump *error;
- error = i915_gpu_coredump(gt, engine_mask);
+ error = i915_gpu_coredump(gt, engine_mask, dump_flags);
if (IS_ERR(error)) {
cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
return;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 88ce3a08f555..a611abacd9c2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -53,6 +53,8 @@ struct i915_request_coredump {
struct i915_sched_attr sched_attr;
};
+struct __guc_capture_parsed_output;
+
struct intel_engine_coredump {
const struct intel_engine_cs *engine;
@@ -84,11 +86,15 @@ struct intel_engine_coredump {
u32 rc_psmi; /* sleep state */
struct intel_instdone instdone;
+ /* GuC matched capture-lists info */
+ struct intel_guc_state_capture *capture;
+ struct __guc_capture_parsed_output *guc_capture_node;
+
struct i915_gem_context_coredump {
char comm[TASK_COMM_LEN];
u64 total_runtime;
- u32 avg_runtime;
+ u64 avg_runtime;
pid_t pid;
int active;
@@ -124,7 +130,6 @@ struct intel_gt_coredump {
u32 pgtbl_er;
u32 ier;
u32 gtier[6], ngtier;
- u32 derrmr;
u32 forcewake;
u32 error; /* gen6+ */
u32 err_int; /* gen7 */
@@ -137,9 +142,12 @@ struct intel_gt_coredump {
u32 gfx_mode;
u32 gtt_cache;
u32 aux_err; /* gen12 */
- u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */
u32 gam_done; /* gen12 */
+ /* Display related */
+ u32 derrmr;
+ u32 sfc_done[I915_MAX_SFC]; /* gen12 */
+
u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
@@ -149,6 +157,7 @@ struct intel_gt_coredump {
struct intel_uc_fw guc_fw;
struct intel_uc_fw huc_fw;
struct i915_vma_coredump *guc_log;
+ bool is_guc_capture;
} *uc;
struct intel_gt_coredump *next;
@@ -221,24 +230,32 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
+#define CORE_DUMP_FLAG_NONE 0x0
+#define CORE_DUMP_FLAG_IS_GUC_CAPTURE BIT(0)
+
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma);
+struct i915_vma_coredump *
+intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
- intel_engine_mask_t engine_mask);
+ intel_engine_mask_t engine_mask, u32 dump_flags);
void i915_capture_error_state(struct intel_gt *gt,
- intel_engine_mask_t engine_mask);
+ intel_engine_mask_t engine_mask, u32 dump_flags);
struct i915_gpu_coredump *
i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp);
struct intel_gt_coredump *
-intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp);
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags);
struct intel_engine_coredump *
-intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags);
struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
@@ -288,7 +305,7 @@ i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
}
static inline void
-i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask)
+i915_capture_error_state(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
}
@@ -299,13 +316,13 @@ i915_gpu_coredump_alloc(struct drm_i915_private *i915, gfp_t gfp)
}
static inline struct intel_gt_coredump *
-intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp)
+intel_gt_coredump_alloc(struct intel_gt *gt, gfp_t gfp, u32 dump_flags)
{
return NULL;
}
static inline struct intel_engine_coredump *
-intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
+intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_flags)
{
return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 5ad071e09301..701fbc98afa0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -202,6 +202,9 @@ i915_param_named_unsafe(request_timeout_ms, uint, 0600,
"Default request/fence/batch buffer expiration timeout.");
#endif
+i915_param_named_unsafe(lmem_size, uint, 0400,
+ "Set the lmem size(in MiB) for each region. (default: 0, all memory)");
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c779a6f85c7e..b5e7ea45d191 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -73,6 +73,7 @@ struct drm_printer;
param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
+ param(unsigned int, lmem_size, 0, 0400) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
param(bool, load_detect_test, false, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 9e077929ed67..38f7de778914 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -901,7 +901,8 @@ static const struct intel_device_info rkl_info = {
.has_llc = 0, \
.has_pxp = 0, \
.has_snoop = 1, \
- .is_dgfx = 1
+ .is_dgfx = 1, \
+ .has_heci_gscfi = 1
static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
@@ -1050,6 +1051,7 @@ static const struct intel_device_info xehpsdv_info = {
.has_4tile = 1, \
.has_64k_pages = 1, \
.has_guc_deprivilege = 1, \
+ .has_heci_pxp = 1, \
.needs_compact_pt = 1, \
.platform_engine_mask = \
BIT(RCS0) | BIT(BCS0) | \
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 2dfbc22857a3..7584cec53d5d 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -9,6 +9,7 @@
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_query.h"
+#include "gt/intel_engine_user.h"
#include <uapi/drm/i915_drm.h>
static int copy_query_item(void *query_hdr, size_t query_sz,
@@ -28,36 +29,30 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
return 0;
}
-static int query_topology_info(struct drm_i915_private *dev_priv,
- struct drm_i915_query_item *query_item)
+static int fill_topology_info(const struct sseu_dev_info *sseu,
+ struct drm_i915_query_item *query_item,
+ const u8 *subslice_mask)
{
- const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
int ret;
- if (query_item->flags != 0)
- return -EINVAL;
+ BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
if (sseu->max_slices == 0)
return -ENODEV;
- BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask));
-
slice_length = sizeof(sseu->slice_mask);
subslice_length = sseu->max_slices * sseu->ss_stride;
eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride;
total_length = sizeof(topo) + slice_length + subslice_length +
eu_length;
- ret = copy_query_item(&topo, sizeof(topo), total_length,
- query_item);
+ ret = copy_query_item(&topo, sizeof(topo), total_length, query_item);
+
if (ret != 0)
return ret;
- if (topo.flags != 0)
- return -EINVAL;
-
memset(&topo, 0, sizeof(topo));
topo.max_slices = sseu->max_slices;
topo.max_subslices = sseu->max_subslices;
@@ -69,27 +64,64 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.eu_stride = sseu->eu_stride;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
- &topo, sizeof(topo)))
+ &topo, sizeof(topo)))
return -EFAULT;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
- &sseu->slice_mask, slice_length))
+ &sseu->slice_mask, slice_length))
return -EFAULT;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) + slice_length),
- sseu->subslice_mask, subslice_length))
+ sizeof(topo) + slice_length),
+ subslice_mask, subslice_length))
return -EFAULT;
if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
- sizeof(topo) +
- slice_length + subslice_length),
- sseu->eu_mask, eu_length))
+ sizeof(topo) +
+ slice_length + subslice_length),
+ sseu->eu_mask, eu_length))
return -EFAULT;
return total_length;
}
+static int query_topology_info(struct drm_i915_private *dev_priv,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu = &to_gt(dev_priv)->info.sseu;
+
+ if (query_item->flags != 0)
+ return -EINVAL;
+
+ return fill_topology_info(sseu, query_item, sseu->subslice_mask);
+}
+
+static int query_geometry_subslices(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ const struct sseu_dev_info *sseu;
+ struct intel_engine_cs *engine;
+ struct i915_engine_class_instance classinstance;
+
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ return -ENODEV;
+
+ classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
+
+ engine = intel_engine_lookup_user(i915, (u8)classinstance.engine_class,
+ (u8)classinstance.engine_instance);
+
+ if (!engine)
+ return -EINVAL;
+
+ if (engine->class != RENDER_CLASS)
+ return -EINVAL;
+
+ sseu = &engine->gt->info.sseu;
+
+ return fill_topology_info(sseu, query_item, sseu->geometry_subslice_mask);
+}
+
static int
query_engine_info(struct drm_i915_private *i915,
struct drm_i915_query_item *query_item)
@@ -479,12 +511,36 @@ static int query_memregion_info(struct drm_i915_private *i915,
return total_length;
}
+static int query_hwconfig_blob(struct drm_i915_private *i915,
+ struct drm_i915_query_item *query_item)
+{
+ struct intel_gt *gt = to_gt(i915);
+ struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
+
+ if (!hwconfig->size || !hwconfig->ptr)
+ return -ENODEV;
+
+ if (query_item->length == 0)
+ return hwconfig->size;
+
+ if (query_item->length < hwconfig->size)
+ return -EINVAL;
+
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ hwconfig->ptr, hwconfig->size))
+ return -EFAULT;
+
+ return hwconfig->size;
+}
+
static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item) = {
query_topology_info,
query_engine_info,
query_perf_config,
query_memregion_info,
+ query_hwconfig_blob,
+ query_geometry_subslices,
};
int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 51f46fe45c72..98bb53226d6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -976,6 +976,10 @@
#define GEN12_COMPUTE2_RING_BASE 0x1e000
#define GEN12_COMPUTE3_RING_BASE 0x26000
#define BLT_RING_BASE 0x22000
+#define DG1_GSC_HECI1_BASE 0x00258000
+#define DG1_GSC_HECI2_BASE 0x00259000
+#define DG2_GSC_HECI1_BASE 0x00373000
+#define DG2_GSC_HECI2_BASE 0x00374000
@@ -1842,6 +1846,17 @@
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
+#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
+#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
+#define PROCHOT_MASK REG_BIT(1)
+#define THERMAL_LIMIT_MASK REG_BIT(2)
+#define RATL_MASK REG_BIT(6)
+#define VR_THERMALERT_MASK REG_BIT(7)
+#define VR_TDC_MASK REG_BIT(8)
+#define POWER_LIMIT_4_MASK REG_BIT(9)
+#define POWER_LIMIT_1_MASK REG_BIT(11)
+#define POWER_LIMIT_2_MASK REG_BIT(12)
+
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -4352,12 +4367,12 @@
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
-#define DISP_POS_Y_MASK REG_GENMASK(31, 0)
+#define DISP_POS_Y_MASK REG_GENMASK(31, 16)
#define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y))
#define DISP_POS_X_MASK REG_GENMASK(15, 0)
#define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x))
#define _DSPASIZE 0x70190
-#define DISP_HEIGHT_MASK REG_GENMASK(31, 0)
+#define DISP_HEIGHT_MASK REG_GENMASK(31, 16)
#define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h))
#define DISP_WIDTH_MASK REG_GENMASK(15, 0)
#define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w))
@@ -5160,7 +5175,7 @@
#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
-#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
+#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
_SEL_FETCH_PLANE_BASE_1_A, \
@@ -8460,6 +8475,9 @@ enum skl_power_gate {
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
+#define XEHPSDV_TILE0_ADDR_RANGE _MMIO(0x4900)
+#define XEHPSDV_TILE_LMEM_RANGE_SHIFT 8
+
#define XEHPSDV_FLAT_CCS_BASE_ADDR _MMIO(0x4910)
#define XEHPSDV_CCS_BASE_SHIFT 8
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index d78d78fce431..8f486f77609f 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -123,6 +123,4 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VLV_DISPLAY_BASE 0x180000
-#define GEN12_SFC_DONE_MAX 4
-
#endif /* __I915_REG_DEFS__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a4d1759375b9..8521daba212a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -39,113 +39,12 @@
#include "i915_sysfs.h"
#include "intel_pm.h"
-static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
+struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
struct drm_minor *minor = dev_get_drvdata(kdev);
return to_i915(minor->dev);
}
-#ifdef CONFIG_PM
-static u32 calc_residency(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- intel_wakeref_t wakeref;
- u64 res = 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- res = intel_rc6_residency_us(&to_gt(dev_priv)->rc6, reg);
-
- return DIV_ROUND_CLOSEST_ULL(res, 1000);
-}
-
-static ssize_t rc6_enable_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- unsigned int mask;
-
- mask = 0;
- if (HAS_RC6(dev_priv))
- mask |= BIT(0);
- if (HAS_RC6p(dev_priv))
- mask |= BIT(1);
- if (HAS_RC6pp(dev_priv))
- mask |= BIT(2);
-
- return sysfs_emit(buf, "%x\n", mask);
-}
-
-static ssize_t rc6_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
- return sysfs_emit(buf, "%u\n", rc6_residency);
-}
-
-static ssize_t rc6p_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
- return sysfs_emit(buf, "%u\n", rc6p_residency);
-}
-
-static ssize_t rc6pp_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
- return sysfs_emit(buf, "%u\n", rc6pp_residency);
-}
-
-static ssize_t media_rc6_residency_ms_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
- return sysfs_emit(buf, "%u\n", rc6_residency);
-}
-
-static DEVICE_ATTR_RO(rc6_enable);
-static DEVICE_ATTR_RO(rc6_residency_ms);
-static DEVICE_ATTR_RO(rc6p_residency_ms);
-static DEVICE_ATTR_RO(rc6pp_residency_ms);
-static DEVICE_ATTR_RO(media_rc6_residency_ms);
-
-static struct attribute *rc6_attrs[] = {
- &dev_attr_rc6_enable.attr,
- &dev_attr_rc6_residency_ms.attr,
- NULL
-};
-
-static const struct attribute_group rc6_attr_group = {
- .name = power_group_name,
- .attrs = rc6_attrs
-};
-
-static struct attribute *rc6p_attrs[] = {
- &dev_attr_rc6p_residency_ms.attr,
- &dev_attr_rc6pp_residency_ms.attr,
- NULL
-};
-
-static const struct attribute_group rc6p_attr_group = {
- .name = power_group_name,
- .attrs = rc6p_attrs
-};
-
-static struct attribute *media_rc6_attrs[] = {
- &dev_attr_media_rc6_residency_ms.attr,
- NULL
-};
-
-static const struct attribute_group media_rc6_attr_group = {
- .name = power_group_name,
- .attrs = media_rc6_attrs
-};
-#endif
-
static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
if (!HAS_L3_DPF(i915))
@@ -257,171 +156,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-static ssize_t gt_act_freq_mhz_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps));
-}
-
-static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps));
-}
-
-static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps));
-}
-
-static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(dev_priv)->rps;
- ssize_t ret;
- u32 val;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
-
- ret = intel_rps_set_boost_frequency(rps, val);
-
- return ret ?: count;
-}
-
-static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
- struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(dev_priv)->rps;
-
- return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq));
-}
-
-static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = to_gt(dev_priv);
- struct intel_rps *rps = &gt->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps));
-}
-
-static ssize_t gt_max_freq_mhz_store(struct device *kdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = to_gt(dev_priv);
- struct intel_rps *rps = &gt->rps;
- ssize_t ret;
- u32 val;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
-
- ret = intel_rps_set_max_frequency(rps, val);
-
- return ret ?: count;
-}
-
-static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_gt *gt = to_gt(i915);
- struct intel_rps *rps = &gt->rps;
-
- return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps));
-}
-
-static ssize_t gt_min_freq_mhz_store(struct device *kdev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(i915)->rps;
- ssize_t ret;
- u32 val;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
-
- ret = intel_rps_set_min_frequency(rps, val);
-
- return ret ?: count;
-}
-
-static DEVICE_ATTR_RO(gt_act_freq_mhz);
-static DEVICE_ATTR_RO(gt_cur_freq_mhz);
-static DEVICE_ATTR_RW(gt_boost_freq_mhz);
-static DEVICE_ATTR_RW(gt_max_freq_mhz);
-static DEVICE_ATTR_RW(gt_min_freq_mhz);
-
-static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
-
-static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
-static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
-static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
-static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
-
-/* For now we have a static number of RP states */
-static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
-{
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct intel_rps *rps = &to_gt(dev_priv)->rps;
- u32 val;
-
- if (attr == &dev_attr_gt_RP0_freq_mhz)
- val = intel_rps_get_rp0_frequency(rps);
- else if (attr == &dev_attr_gt_RP1_freq_mhz)
- val = intel_rps_get_rp1_frequency(rps);
- else if (attr == &dev_attr_gt_RPn_freq_mhz)
- val = intel_rps_get_rpn_frequency(rps);
- else
- BUG();
-
- return sysfs_emit(buf, "%d\n", val);
-}
-
-static const struct attribute * const gen6_attrs[] = {
- &dev_attr_gt_act_freq_mhz.attr,
- &dev_attr_gt_cur_freq_mhz.attr,
- &dev_attr_gt_boost_freq_mhz.attr,
- &dev_attr_gt_max_freq_mhz.attr,
- &dev_attr_gt_min_freq_mhz.attr,
- &dev_attr_gt_RP0_freq_mhz.attr,
- &dev_attr_gt_RP1_freq_mhz.attr,
- &dev_attr_gt_RPn_freq_mhz.attr,
- NULL,
-};
-
-static const struct attribute * const vlv_attrs[] = {
- &dev_attr_gt_act_freq_mhz.attr,
- &dev_attr_gt_cur_freq_mhz.attr,
- &dev_attr_gt_boost_freq_mhz.attr,
- &dev_attr_gt_max_freq_mhz.attr,
- &dev_attr_gt_min_freq_mhz.attr,
- &dev_attr_gt_RP0_freq_mhz.attr,
- &dev_attr_gt_RP1_freq_mhz.attr,
- &dev_attr_gt_RPn_freq_mhz.attr,
- &dev_attr_vlv_rpe_freq_mhz.attr,
- NULL,
-};
-
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
@@ -492,29 +226,6 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
struct device *kdev = dev_priv->drm.primary->kdev;
int ret;
-#ifdef CONFIG_PM
- if (HAS_RC6(dev_priv)) {
- ret = sysfs_merge_group(&kdev->kobj,
- &rc6_attr_group);
- if (ret)
- drm_err(&dev_priv->drm,
- "RC6 residency sysfs setup failed\n");
- }
- if (HAS_RC6p(dev_priv)) {
- ret = sysfs_merge_group(&kdev->kobj,
- &rc6p_attr_group);
- if (ret)
- drm_err(&dev_priv->drm,
- "RC6p residency sysfs setup failed\n");
- }
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- ret = sysfs_merge_group(&kdev->kobj,
- &media_rc6_attr_group);
- if (ret)
- drm_err(&dev_priv->drm,
- "Media RC6 residency sysfs setup failed\n");
- }
-#endif
if (HAS_L3_DPF(dev_priv)) {
ret = device_create_bin_file(kdev, &dpf_attrs);
if (ret)
@@ -530,13 +241,10 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
}
}
- ret = 0;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
- else if (GRAPHICS_VER(dev_priv) >= 6)
- ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
- if (ret)
- drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
+ dev_priv->sysfs_gt = kobject_create_and_add("gt", &kdev->kobj);
+ if (!dev_priv->sysfs_gt)
+ drm_warn(&dev_priv->drm,
+ "failed to register GT sysfs directory\n");
i915_setup_error_capture(kdev);
@@ -549,14 +257,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
i915_teardown_error_capture(kdev);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sysfs_remove_files(&kdev->kobj, vlv_attrs);
- else
- sysfs_remove_files(&kdev->kobj, gen6_attrs);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
-#ifdef CONFIG_PM
- sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
- sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
-#endif
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.h b/drivers/gpu/drm/i915/i915_sysfs.h
index 41afd4366416..243a17741e3f 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.h
+++ b/drivers/gpu/drm/i915/i915_sysfs.h
@@ -6,8 +6,11 @@
#ifndef __I915_SYSFS_H__
#define __I915_SYSFS_H__
+struct device;
struct drm_i915_private;
+struct drm_i915_private *kdev_minor_to_i915(struct device *kdev);
+
void i915_setup_sysfs(struct drm_i915_private *i915);
void i915_teardown_sysfs(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 129f668f21ff..a5109548abc0 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -70,8 +70,10 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
min_page_size = bo->page_alignment << PAGE_SHIFT;
GEM_BUG_ON(min_page_size < mm->chunk_size);
+ GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+ if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
+ place->flags & TTM_PL_FLAG_CONTIGUOUS) {
unsigned long pages;
size = roundup_pow_of_two(size);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d077f7b9eaad..162e8d83691b 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -47,7 +47,7 @@ static inline void assert_vma_held_evict(const struct i915_vma *vma)
* This is the only exception to the requirement of the object lock
* being held.
*/
- if (atomic_read(&vma->vm->open))
+ if (kref_read(&vma->vm->ref))
assert_object_held_shared(vma->obj);
}
@@ -113,6 +113,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
+ int err;
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
@@ -121,8 +122,6 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- kref_init(&vma->ref);
- vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->size = obj->base.size;
@@ -138,6 +137,8 @@ vma_create(struct drm_i915_gem_object *obj,
}
INIT_LIST_HEAD(&vma->closed_link);
+ INIT_LIST_HEAD(&vma->obj_link);
+ RB_CLEAR_NODE(&vma->obj_node);
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
@@ -163,8 +164,16 @@ vma_create(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
- spin_lock(&obj->vma.lock);
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err) {
+ pos = ERR_PTR(err);
+ goto err_vma;
+ }
+
+ vma->vm = vm;
+ list_add_tail(&vma->vm_link, &vm->unbound_list);
+ spin_lock(&obj->vma.lock);
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
goto err_unlock;
@@ -222,13 +231,15 @@ vma_create(struct drm_i915_gem_object *obj,
list_add_tail(&vma->obj_link, &obj->vma.list);
spin_unlock(&obj->vma.lock);
+ mutex_unlock(&vm->mutex);
return vma;
err_unlock:
spin_unlock(&obj->vma.lock);
+ list_del_init(&vma->vm_link);
+ mutex_unlock(&vm->mutex);
err_vma:
- i915_vm_put(vm);
i915_vma_free(vma);
return pos;
}
@@ -279,7 +290,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
- GEM_BUG_ON(!atomic_read(&vm->open));
+ GEM_BUG_ON(!kref_read(&vm->ref));
spin_lock(&obj->vma.lock);
vma = i915_vma_lookup(obj, vm, view);
@@ -322,7 +333,6 @@ static void __vma_release(struct dma_fence_work *work)
i915_gem_object_put(vw->pinned);
i915_vm_free_pt_stash(vw->vm, &vw->stash);
- i915_vm_put(vw->vm);
if (vw->vma_res)
i915_vma_resource_put(vw->vma_res);
}
@@ -515,21 +525,18 @@ int i915_vma_bind(struct i915_vma *vma,
if (!work->vma_res->bi.pages_rsgt)
work->pinned = i915_gem_object_get(vma->obj);
} else {
- if (vma->obj) {
- ret = i915_gem_object_wait_moving_fence(vma->obj, true);
- if (ret) {
- i915_vma_resource_free(vma->resource);
- vma->resource = NULL;
+ ret = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (ret) {
+ i915_vma_resource_free(vma->resource);
+ vma->resource = NULL;
- return ret;
- }
+ return ret;
}
vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
bind_flags);
}
- if (vma->obj)
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
atomic_or(bind_flags, &vma->flags);
return 0;
@@ -841,7 +848,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
}
@@ -857,7 +864,7 @@ i915_vma_detach(struct i915_vma *vma)
* vma, we can drop its hold on the backing storage and allow
* it to be reaped by the shrinker.
*/
- list_del(&vma->vm_link);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -1357,18 +1364,10 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (vma->obj) {
- err = i915_gem_object_get_moving_fence(vma->obj, &moving);
- if (err)
- return err;
- } else {
- moving = NULL;
- }
-
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- if (flags & vma->vm->bind_async_flags || moving) {
+ if (flags & vma->vm->bind_async_flags) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
@@ -1380,7 +1379,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
goto err_rpm;
}
- work->vm = i915_vm_get(vma->vm);
+ work->vm = vma->vm;
+
+ err = i915_gem_object_get_moving_fence(vma->obj, &moving);
+ if (err)
+ goto err_rpm;
dma_fence_work_chain(&work->base, moving);
@@ -1625,16 +1628,6 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
-void i915_vma_release(struct kref *ref)
-{
- struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
-
- i915_vm_put(vma->vm);
- i915_active_fini(&vma->active);
- GEM_WARN_ON(vma->resource);
- i915_vma_free(vma);
-}
-
static void force_unbind(struct i915_vma *vma)
{
if (!drm_mm_node_allocated(&vma->node))
@@ -1645,7 +1638,7 @@ static void force_unbind(struct i915_vma *vma)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
}
-static void release_references(struct i915_vma *vma)
+static void release_references(struct i915_vma *vma, bool vm_ddestroy)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -1655,11 +1648,17 @@ static void release_references(struct i915_vma *vma)
list_del(&vma->obj_link);
if (!RB_EMPTY_NODE(&vma->obj_node))
rb_erase(&vma->obj_node, &obj->vma.tree);
+
spin_unlock(&obj->vma.lock);
__i915_vma_remove_closed(vma);
- __i915_vma_put(vma);
+ if (vm_ddestroy)
+ i915_vm_resv_put(vma->vm);
+
+ i915_active_fini(&vma->active);
+ GEM_WARN_ON(vma->resource);
+ i915_vma_free(vma);
}
/**
@@ -1674,8 +1673,12 @@ static void release_references(struct i915_vma *vma)
* - __i915_gem_object_pages_fini()
* - __i915_vm_close() - Blocks the above function by taking a reference on
* the object.
- * - __i915_vma_parked() - Blocks the above functions by taking an open-count on
- * the vm and a reference on the object.
+ * - __i915_vma_parked() - Blocks the above functions by taking a reference
+ * on the vm and a reference on the object. Also takes the object lock so
+ * destruction from __i915_vma_parked() can be blocked by holding the
+ * object lock. Since the object lock is only allowed from within i915 with
+ * an object refcount, holding the object lock also implicitly blocks the
+ * vma freeing from __i915_gem_object_pages_fini().
*
* Because of locks taken during destruction, a vma is also guaranteed to
* stay alive while the following locks are held if it was looked up while
@@ -1683,24 +1686,27 @@ static void release_references(struct i915_vma *vma)
* - vm->mutex
* - obj->vma.lock
* - gt->closed_lock
- *
- * A vma user can also temporarily keep the vma alive while holding a vma
- * reference.
*/
void i915_vma_destroy_locked(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->mutex);
force_unbind(vma);
- release_references(vma);
+ list_del_init(&vma->vm_link);
+ release_references(vma, false);
}
void i915_vma_destroy(struct i915_vma *vma)
{
+ bool vm_ddestroy;
+
mutex_lock(&vma->vm->mutex);
force_unbind(vma);
+ list_del_init(&vma->vm_link);
+ vm_ddestroy = vma->vm_ddestroy;
+ vma->vm_ddestroy = false;
mutex_unlock(&vma->vm->mutex);
- release_references(vma);
+ release_references(vma, vm_ddestroy);
}
void i915_vma_parked(struct intel_gt *gt)
@@ -1718,7 +1724,7 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (!i915_vm_tryget(vm)) {
i915_gem_object_put(obj);
continue;
}
@@ -1744,7 +1750,7 @@ void i915_vma_parked(struct intel_gt *gt)
}
i915_gem_object_put(obj);
- i915_vm_close(vm);
+ i915_vm_put(vm);
}
}
@@ -1903,7 +1909,9 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
- atomic_read(&vma->vm->open);
+ kref_read(&vma->vm->ref);
+ vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
+ vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
unbind_fence = i915_vma_resource_unbind(vma_res);
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 67ae7341c7e0..6034991d89fe 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -222,20 +222,6 @@ void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_reopen(struct i915_vma *vma);
-static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma)
-{
- if (kref_get_unless_zero(&vma->ref))
- return vma;
-
- return NULL;
-}
-
-void i915_vma_release(struct kref *ref);
-static inline void __i915_vma_put(struct i915_vma *vma)
-{
- kref_put(&vma->ref, i915_vma_release);
-}
-
void i915_vma_destroy_locked(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 57ae92ba8af1..27c55027387a 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -178,7 +178,7 @@ static void i915_vma_resource_unbind_work(struct work_struct *work)
bool lockdep_cookie;
lockdep_cookie = dma_fence_begin_signalling();
- if (likely(atomic_read(&vm->open)))
+ if (likely(!vma_res->skip_pte_rewrite))
vma_res->ops->unbind_vma(vm, vma_res);
dma_fence_end_signalling(lockdep_cookie);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 25913913baa6..5d8427caa2ba 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -62,6 +62,11 @@ struct i915_page_sizes {
* deferred to a work item awaiting unsignaled fences. This is a hack.
* (dma_fence_work uses a fence flag for this, but this seems slightly
* cleaner).
+ * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
+ * take a wakeref in the dma-fence signalling critical path, it needs to be
+ * taken when the unbind is scheduled.
+ * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
+ * needs to be skipped for unbind.
*
* The lifetime of a struct i915_vma_resource is from a binding request to
* the actual possible asynchronous unbind has completed.
@@ -113,6 +118,7 @@ struct i915_vma_resource {
bool allocated:1;
bool immediate_unbind:1;
bool needs_wakeref:1;
+ bool skip_pte_rewrite:1;
};
bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 88370dadca82..be6e028c3b57 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -211,7 +211,6 @@ struct i915_vma {
* handles (but same file) for execbuf, i.e. the number of aliases
* that exist in the ctx->handle_vmas LUT for this vma.
*/
- struct kref ref;
atomic_t open_count;
atomic_t flags;
/**
@@ -272,6 +271,13 @@ struct i915_vma {
atomic_t pages_count; /* number of active binds to the pages */
/**
+ * Whether we hold a reference on the vm dma_resv lock to temporarily
+ * block vm freeing until the vma is destroyed.
+ * Protected by the vm mutex.
+ */
+ bool vm_ddestroy;
+
+ /**
* Support different GGTT views into the same object.
* This means there can be multiple VMA mappings per object and per VM.
* i915_ggtt_view_type is used to distinguish between those entries.
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index f9b955810593..576d15a04c9e 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -141,6 +141,8 @@ enum intel_ppgtt_type {
func(has_flat_ccs); \
func(has_global_mocs); \
func(has_gt_uc); \
+ func(has_heci_pxp); \
+ func(has_heci_gscfi); \
func(has_guc_deprivilege); \
func(has_l3_dpf); \
func(has_llc); \
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index ded78b83e0b5..e38d2db1c3e3 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -19,7 +19,7 @@ static const struct {
.class = INTEL_MEMORY_SYSTEM,
.instance = 0,
},
- [INTEL_REGION_LMEM] = {
+ [INTEL_REGION_LMEM_0] = {
.class = INTEL_MEMORY_LOCAL,
.instance = 0,
},
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index bbc35ec5c090..3d8378c1b447 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -29,14 +29,17 @@ enum intel_memory_type {
enum intel_region_id {
INTEL_REGION_SMEM = 0,
- INTEL_REGION_LMEM,
+ INTEL_REGION_LMEM_0,
+ INTEL_REGION_LMEM_1,
+ INTEL_REGION_LMEM_2,
+ INTEL_REGION_LMEM_3,
INTEL_REGION_STOLEN_SMEM,
INTEL_REGION_STOLEN_LMEM,
INTEL_REGION_UNKNOWN, /* Should be last */
};
#define REGION_SMEM BIT(INTEL_REGION_SMEM)
-#define REGION_LMEM BIT(INTEL_REGION_LMEM)
+#define REGION_LMEM BIT(INTEL_REGION_LMEM_0)
#define REGION_STOLEN_SMEM BIT(INTEL_REGION_STOLEN_SMEM)
#define REGION_STOLEN_LMEM BIT(INTEL_REGION_STOLEN_LMEM)
@@ -54,6 +57,7 @@ struct intel_memory_region_ops {
int (*init_object)(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 737ef3f4ab54..62ff77445b01 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -12,6 +12,7 @@
#include "intel_region_ttm.h"
+#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
@@ -191,6 +192,7 @@ intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
*/
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags)
{
@@ -202,7 +204,10 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mem->io_size && mem->io_size < mem->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place.fpfn = offset >> PAGE_SHIFT;
+ place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
+ } else if (mem->io_size && mem->io_size < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h
index fdee5e7bd46c..cf9d86dcf409 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.h
+++ b/drivers/gpu/drm/i915/intel_region_ttm.h
@@ -36,6 +36,7 @@ struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 24cd3c3729ca..83517a703eb6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2050,14 +2050,11 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-int intel_uncore_setup_mmio(struct intel_uncore *uncore)
+int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
{
struct drm_i915_private *i915 = uncore->i915;
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
- int mmio_bar;
int mmio_size;
- mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -2074,7 +2071,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore)
else
mmio_size = 2 * 1024 * 1024;
- uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+ uncore->regs = ioremap(phys_addr, mmio_size);
if (uncore->regs == NULL) {
drm_err(&i915->drm, "failed to map registers\n");
return -EIO;
@@ -2085,9 +2082,7 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore)
void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
{
- struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
-
- pci_iounmap(pdev, uncore->regs);
+ iounmap(uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore,
@@ -2475,17 +2470,46 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return fw_domains;
}
-u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
- i915_reg_t reg,
- int slice, int subslice)
+/**
+ * uncore_rw_with_mcr_steering_fw - Access a register after programming
+ * the MCR selector register.
+ * @uncore: pointer to struct intel_uncore
+ * @reg: register being accessed
+ * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
+ * @slice: slice number (ignored for multi-cast write)
+ * @subslice: sub-slice number (ignored for multi-cast write)
+ * @value: register value to be written (ignored for read)
+ *
+ * Return: 0 for write access. register value for read access.
+ *
+ * Caller needs to make sure the relevant forcewake wells are up.
+ */
+static u32 uncore_rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int slice, int subslice, u32 value)
{
- u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
lockdep_assert_held(&uncore->lock);
if (GRAPHICS_VER(uncore->i915) >= 11) {
mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+
+ /*
+ * Wa_22013088509
+ *
+ * The setting of the multicast/unicast bit usually wouldn't
+ * matter for read operations (which always return the value
+ * from a single register instance regardless of how that bit
+ * is set), but some platforms have a workaround requiring us
+ * to remain in multicast mode for reads. There's no real
+ * downside to this, so we'll just go ahead and do so on all
+ * platforms; we'll only clear the multicast bit from the mask
+ * when exlicitly doing a write operation.
+ */
+ if (rw_flag == FW_REG_WRITE)
+ mcr_mask |= GEN11_MCR_MULTICAST;
} else {
mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
@@ -2497,7 +2521,10 @@ u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- val = intel_uncore_read_fw(uncore, reg);
+ if (rw_flag == FW_REG_READ)
+ val = intel_uncore_read_fw(uncore, reg);
+ else
+ intel_uncore_write_fw(uncore, reg, value);
mcr &= ~mcr_mask;
mcr |= old_mcr & mcr_mask;
@@ -2507,14 +2534,16 @@ u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
return val;
}
-u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
- i915_reg_t reg, int slice, int subslice)
+static u32 uncore_rw_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u8 rw_flag,
+ int slice, int subslice,
+ u32 value)
{
enum forcewake_domains fw_domains;
u32 val;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
- FW_REG_READ);
+ rw_flag);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
@@ -2522,7 +2551,8 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
+ val = uncore_rw_with_mcr_steering_fw(uncore, reg, rw_flag,
+ slice, subslice, value);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
@@ -2530,6 +2560,28 @@ u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
return val;
}
+u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, int slice, int subslice)
+{
+ return uncore_rw_with_mcr_steering_fw(uncore, reg, FW_REG_READ,
+ slice, subslice, 0);
+}
+
+u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, int slice, int subslice)
+{
+ return uncore_rw_with_mcr_steering(uncore, reg, FW_REG_READ,
+ slice, subslice, 0);
+}
+
+void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 value,
+ int slice, int subslice)
+{
+ uncore_rw_with_mcr_steering(uncore, reg, FW_REG_WRITE,
+ slice, subslice, value);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_uncore.c"
#include "selftests/intel_uncore.c"
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 6ff56d673e2b..52fe3d89dd2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -29,6 +29,7 @@
#include <linux/notifier.h>
#include <linux/hrtimer.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/types.h>
#include "i915_reg_defs.h"
@@ -214,12 +215,14 @@ u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
int slice, int subslice);
u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
i915_reg_t reg, int slice, int subslice);
-
+void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 value,
+ int slice, int subslice);
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct intel_gt *gt);
-int intel_uncore_setup_mmio(struct intel_uncore *uncore);
+int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ab751192eb3b..8633bec18fa7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1112,10 +1112,16 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
expected_node_size = expected_vma_size;
- if (NEEDS_COMPACT_PT(vm->i915) && i915_gem_object_is_lmem(obj)) {
- /* compact-pt should expand lmem node to 2MB */
+ if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
+ /*
+ * The compact-pt should expand lmem node to 2MB for the ppGTT,
+ * for all other cases we should only expect 64K.
+ */
expected_vma_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
- expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ if (NEEDS_COMPACT_PT(vm->i915) && !i915_is_ggtt(vm))
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_2M);
+ else
+ expected_node_size = round_up(size, I915_GTT_PAGE_SIZE_64K);
}
if (vma->size != expected_vma_size || vma->node.size != expected_node_size) {
@@ -1150,7 +1156,7 @@ static int misaligned_pin(struct i915_address_space *vm,
flags |= PIN_GLOBAL;
for_each_memory_region(mr, vm->i915, id) {
- u64 min_alignment = i915_vm_min_alignment(vm, (enum intel_memory_type)id);
+ u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
u64 size = min_alignment;
u64 addr = round_down(hole_start + (hole_size / 2), min_alignment);
@@ -1205,7 +1211,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
goto out_free;
}
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
- GEM_BUG_ON(!atomic_read(&ppgtt->vm.open));
+ assert_vm_alive(&ppgtt->vm);
err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
@@ -1438,7 +1444,7 @@ static void track_vma_bind(struct i915_vma *vma)
vma->resource->bi.pages = vma->pages;
mutex_lock(&vma->vm->mutex);
- list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 573d9b2e1a4a..9c31a16f8380 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -73,7 +73,7 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
intel_region_ttm_device_fini(i915);
- intel_gt_driver_late_release(to_gt(i915));
+ intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -112,6 +112,11 @@ static struct dev_pm_domain pm_domain = {
},
};
+static void mock_gt_probe(struct drm_i915_private *i915)
+{
+ i915->gt[0] = &i915->gt0;
+}
+
struct drm_i915_private *mock_gem_device(void)
{
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
@@ -180,11 +185,11 @@ struct drm_i915_private *mock_gem_device(void)
spin_lock_init(&i915->gpu_error.lock);
i915_gem_init__mm(i915);
- intel_gt_init_early(to_gt(i915), i915);
- __intel_gt_init_early(to_gt(i915), i915);
+ intel_root_gt_init_early(i915);
mock_uncore_init(&i915->uncore, i915);
atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
to_gt(i915)->awake = -ENODEV;
+ mock_gt_probe(i915);
ret = intel_region_ttm_device_init(i915);
if (ret)
@@ -229,7 +234,7 @@ err_unlock:
err_drv:
intel_region_ttm_device_fini(i915);
err_ttm:
- intel_gt_driver_late_release(to_gt(i915));
+ intel_gt_driver_late_release_all(i915);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
mock_destroy_device(i915);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index f64325491f35..670557ce1024 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -26,6 +26,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
int err;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+ obj->bo_offset,
obj->base.size,
obj->flags);
if (IS_ERR(obj->mm.res))
@@ -57,6 +58,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
static int mock_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
+ resource_size_t offset,
resource_size_t size,
resource_size_t page_size,
unsigned int flags)
@@ -70,6 +72,8 @@ static int mock_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 83c31b2ad865..ccc4fcf7a630 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
return ERR_CAST(mmu);
return msm_gem_address_space_create(mmu,
- "gpu", 0x100000000ULL, 0x1ffffffffULL);
+ "gpu", 0x100000000ULL, SZ_4G);
}
static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 89cfd84760d7..8706bcdd1472 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
{}
};
-#ifdef CONFIG_PM
-static int adreno_resume(struct device *dev)
+static int adreno_runtime_resume(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_resume(gpu);
}
-static int active_submits(struct msm_gpu *gpu)
+static int adreno_runtime_suspend(struct device *dev)
{
- int active_submits;
- mutex_lock(&gpu->active_lock);
- active_submits = gpu->active_submits;
- mutex_unlock(&gpu->active_lock);
- return active_submits;
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+
+ /*
+ * We should be holding a runpm ref, which will prevent
+ * runtime suspend. In the system suspend path, we've
+ * already waited for active jobs to complete.
+ */
+ WARN_ON_ONCE(gpu->active_submits);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ /*
+ * Shut down the scheduler before we force suspend, so that
+ * suspend isn't racing with scheduler kthread feeding us
+ * more work.
+ *
+ * Note, we just want to park the thread, and let any jobs
+ * that are already on the hw queue complete normally, as
+ * opposed to the drm_sched_stop() path used for handling
+ * faulting/timed-out jobs. We can't really cancel any jobs
+ * already on the hw queue without racing with the GPU.
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_park(sched->thread);
+ }
}
-static int adreno_suspend(struct device *dev)
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+ kthread_unpark(sched->thread);
+ }
+}
+
+static int adreno_system_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
- int remaining;
+ int remaining, ret;
+
+ suspend_scheduler(gpu);
remaining = wait_event_timeout(gpu->retire_event,
- active_submits(gpu) == 0,
+ gpu->active_submits == 0,
msecs_to_jiffies(1000));
if (remaining == 0) {
dev_err(dev, "Timeout waiting for GPU to suspend\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto out;
}
- return gpu->funcs->pm_suspend(gpu);
+ ret = pm_runtime_force_suspend(dev);
+out:
+ if (ret)
+ resume_scheduler(gpu);
+
+ return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+ resume_scheduler(dev_to_gpu(dev));
+ return pm_runtime_force_resume(dev);
}
-#endif
static const struct dev_pm_ops adreno_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+ RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
};
static struct platform_driver adreno_driver = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c515b7cf922c..c61b5b283f08 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -54,87 +54,87 @@ struct dpu_intr_reg {
* When making changes be sure to sync with dpu_hw_intr_reg
*/
static const struct dpu_intr_reg dpu_intr_set[] = {
- {
+ [MDP_SSPP_TOP0_INTR] = {
MDP_SSPP_TOP0_OFF+INTR_CLEAR,
MDP_SSPP_TOP0_OFF+INTR_EN,
MDP_SSPP_TOP0_OFF+INTR_STATUS
},
- {
+ [MDP_SSPP_TOP0_INTR2] = {
MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
MDP_SSPP_TOP0_OFF+INTR2_EN,
MDP_SSPP_TOP0_OFF+INTR2_STATUS
},
- {
+ [MDP_SSPP_TOP0_HIST_INTR] = {
MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
},
- {
+ [MDP_INTF0_INTR] = {
MDP_INTF_0_OFF+INTF_INTR_CLEAR,
MDP_INTF_0_OFF+INTF_INTR_EN,
MDP_INTF_0_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF1_INTR] = {
MDP_INTF_1_OFF+INTF_INTR_CLEAR,
MDP_INTF_1_OFF+INTF_INTR_EN,
MDP_INTF_1_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF2_INTR] = {
MDP_INTF_2_OFF+INTF_INTR_CLEAR,
MDP_INTF_2_OFF+INTF_INTR_EN,
MDP_INTF_2_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF3_INTR] = {
MDP_INTF_3_OFF+INTF_INTR_CLEAR,
MDP_INTF_3_OFF+INTF_INTR_EN,
MDP_INTF_3_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF4_INTR] = {
MDP_INTF_4_OFF+INTF_INTR_CLEAR,
MDP_INTF_4_OFF+INTF_INTR_EN,
MDP_INTF_4_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_INTF5_INTR] = {
MDP_INTF_5_OFF+INTF_INTR_CLEAR,
MDP_INTF_5_OFF+INTF_INTR_EN,
MDP_INTF_5_OFF+INTF_INTR_STATUS
},
- {
+ [MDP_AD4_0_INTR] = {
MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
},
- {
+ [MDP_AD4_1_INTR] = {
MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
},
- {
+ [MDP_INTF0_7xxx_INTR] = {
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF1_7xxx_INTR] = {
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF2_7xxx_INTR] = {
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF3_7xxx_INTR] = {
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF4_7xxx_INTR] = {
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
},
- {
+ [MDP_INTF5_7xxx_INTR] = {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 1ee824600995..c478d25f7825 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(to_mdp5_plane_state(plane->state));
+ plane->state = NULL;
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return;
__drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 5d2ff6791058..acfe1b31e079 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
va_list va;
new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+ if (!new_blk)
+ return;
va_start(va, fmt);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 178b774a5fbd..a42732b67349 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
+ /*
+ * add fail safe mode outside event_mutex scope
+ * to avoid potiential circular lock with drm thread
+ */
+ dp_panel_add_fail_safe_mode(dp->dp_display.connector);
+
/* uevent will complete connection part */
return 0;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index f1418722c549..26c3653c99ec 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
+{
+ /* fail safe edid */
+ mutex_lock(&connector->dev->mode_config.mutex);
+ if (drm_add_modes_noedid(connector, 640, 480))
+ drm_set_preferred_mode(connector, 640, 480);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
goto end;
}
- /* fail safe edid */
- mutex_lock(&connector->dev->mode_config.mutex);
- if (drm_add_modes_noedid(connector, 640, 480))
- drm_set_preferred_mode(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
- } else {
- /* always add fail-safe mode as backup mode */
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_add_modes_noedid(connector, 640, 480);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ dp_panel_add_fail_safe_mode(connector);
}
if (panel->aux_cfg_update_done) {
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 9023e5bb4b8b..99739ea679a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0c1b7dde377c..9f6af0f0fe00 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
fail:
- connector->funcs->destroy(msm_dsi->connector);
+ connector->funcs->destroy(connector);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 01bbb5f2d462..8f492656c9ad 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -927,6 +927,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
+ put_task_struct(task);
} else {
comm = NULL;
}
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 46029c5610c8..145047e19394 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
if (ret)
- dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+ dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
}
static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
return 0;
}
-static int rpi_touchscreen_enable(struct drm_panel *panel)
+static int rpi_touchscreen_prepare(struct drm_panel *panel)
{
struct rpi_touchscreen *ts = panel_to_ts(panel);
int i;
@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
msleep(100);
+ return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+ struct rpi_touchscreen *ts = panel_to_ts(panel);
+
/* Turn on the backlight. */
rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.disable = rpi_touchscreen_disable,
.unprepare = rpi_touchscreen_noop,
- .prepare = rpi_touchscreen_noop,
+ .prepare = rpi_touchscreen_prepare,
.enable = rpi_touchscreen_enable,
.get_modes = rpi_touchscreen_get_modes,
};
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 11c97edde54d..1045d2c46a76 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -39,12 +39,12 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
- r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
+ r200.o radeon_legacy_tv.o r600_cs.o \
radeon_pm.o atombios_dp.o r600_hdmi.o dce3_1_afmt.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
- evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+ evergreen.o evergreen_cs.o \
+ evergreen_hdmi.o radeon_trace_points.o ni.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
- si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \
+ radeon_prime.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
deleted file mode 100644
index 9fec4d09f383..000000000000
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- * Copyright 2010 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <linux/bug.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-/*
- * evergreen cards need to use the 3D engine to blit data which requires
- * quite a bit of hw state setup. Rather than pull the whole 3D driver
- * (which normally generates the 3D state) into the DRM, we opt to use
- * statically generated state tables. The register state and shaders
- * were hand generated to support blitting functionality. See the 3D
- * driver or documentation for descriptions of the registers and
- * shader instructions.
- */
-
-const u32 cayman_default_state[] =
-{
- 0xc0066900,
- 0x00000000,
- 0x00000060, /* DB_RENDER_CONTROL */
- 0x00000000, /* DB_COUNT_CONTROL */
- 0x00000000, /* DB_DEPTH_VIEW */
- 0x0000002a, /* DB_RENDER_OVERRIDE */
- 0x00000000, /* DB_RENDER_OVERRIDE2 */
- 0x00000000, /* DB_HTILE_DATA_BASE */
-
- 0xc0026900,
- 0x0000000a,
- 0x00000000, /* DB_STENCIL_CLEAR */
- 0x00000000, /* DB_DEPTH_CLEAR */
-
- 0xc0036900,
- 0x0000000f,
- 0x00000000, /* DB_DEPTH_INFO */
- 0x00000000, /* DB_Z_INFO */
- 0x00000000, /* DB_STENCIL_INFO */
-
- 0xc0016900,
- 0x00000080,
- 0x00000000, /* PA_SC_WINDOW_OFFSET */
-
- 0xc00d6900,
- 0x00000083,
- 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
- 0x00000000, /* PA_SC_CLIPRECT_0_TL */
- 0x20002000, /* PA_SC_CLIPRECT_0_BR */
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0xaaaaaaaa, /* PA_SC_EDGERULE */
- 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
- 0x0000000f, /* CB_TARGET_MASK */
- 0x0000000f, /* CB_SHADER_MASK */
-
- 0xc0226900,
- 0x00000094,
- 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
- 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
- 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
-
- 0xc0016900,
- 0x000000d4,
- 0x00000000, /* SX_MISC */
-
- 0xc0026900,
- 0x000000d9,
- 0x00000000, /* CP_RINGID */
- 0x00000000, /* CP_VMID */
-
- 0xc0096900,
- 0x00000100,
- 0x00ffffff, /* VGT_MAX_VTX_INDX */
- 0x00000000, /* VGT_MIN_VTX_INDX */
- 0x00000000, /* VGT_INDX_OFFSET */
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
- 0x00000000, /* SX_ALPHA_TEST_CONTROL */
- 0x00000000, /* CB_BLEND_RED */
- 0x00000000, /* CB_BLEND_GREEN */
- 0x00000000, /* CB_BLEND_BLUE */
- 0x00000000, /* CB_BLEND_ALPHA */
-
- 0xc0016900,
- 0x00000187,
- 0x00000100, /* SPI_VS_OUT_ID_0 */
-
- 0xc0026900,
- 0x00000191,
- 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
- 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
-
- 0xc0016900,
- 0x000001b1,
- 0x00000000, /* SPI_VS_OUT_CONFIG */
-
- 0xc0106900,
- 0x000001b3,
- 0x20000001, /* SPI_PS_IN_CONTROL_0 */
- 0x00000000, /* SPI_PS_IN_CONTROL_1 */
- 0x00000000, /* SPI_INTERP_CONTROL_0 */
- 0x00000000, /* SPI_INPUT_Z */
- 0x00000000, /* SPI_FOG_CNTL */
- 0x00100000, /* SPI_BARYC_CNTL */
- 0x00000000, /* SPI_PS_IN_CONTROL_2 */
- 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
- 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
- 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
- 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
- 0x00000000, /* SPI_GPR_MGMT */
- 0x00000000, /* SPI_LDS_MGMT */
- 0x00000000, /* SPI_STACK_MGMT */
- 0x00000000, /* SPI_WAVE_MGMT_1 */
- 0x00000000, /* SPI_WAVE_MGMT_2 */
-
- 0xc0016900,
- 0x000001e0,
- 0x00000000, /* CB_BLEND0_CONTROL */
-
- 0xc00e6900,
- 0x00000200,
- 0x00000000, /* DB_DEPTH_CONTROL */
- 0x00000000, /* DB_EQAA */
- 0x00cc0010, /* CB_COLOR_CONTROL */
- 0x00000210, /* DB_SHADER_CONTROL */
- 0x00010000, /* PA_CL_CLIP_CNTL */
- 0x00000004, /* PA_SU_SC_MODE_CNTL */
- 0x00000100, /* PA_CL_VTE_CNTL */
- 0x00000000, /* PA_CL_VS_OUT_CNTL */
- 0x00000000, /* PA_CL_NANINF_CNTL */
- 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
- 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
- 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
- 0x00000000, /* */
- 0x00000000, /* */
-
- 0xc0026900,
- 0x00000229,
- 0x00000000, /* SQ_PGM_START_FS */
- 0x00000000,
-
- 0xc0016900,
- 0x0000023b,
- 0x00000000, /* SQ_LDS_ALLOC_PS */
-
- 0xc0066900,
- 0x00000240,
- 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0046900,
- 0x00000247,
- 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0116900,
- 0x00000280,
- 0x00000000, /* PA_SU_POINT_SIZE */
- 0x00000000, /* PA_SU_POINT_MINMAX */
- 0x00000008, /* PA_SU_LINE_CNTL */
- 0x00000000, /* PA_SC_LINE_STIPPLE */
- 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
- 0x00000000, /* VGT_HOS_CNTL */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000, /* VGT_GS_MODE */
-
- 0xc0026900,
- 0x00000292,
- 0x00000000, /* PA_SC_MODE_CNTL_0 */
- 0x00000000, /* PA_SC_MODE_CNTL_1 */
-
- 0xc0016900,
- 0x000002a1,
- 0x00000000, /* VGT_PRIMITIVEID_EN */
-
- 0xc0016900,
- 0x000002a5,
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
-
- 0xc0026900,
- 0x000002a8,
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
- 0x00000000,
-
- 0xc0026900,
- 0x000002ad,
- 0x00000000, /* VGT_REUSE_OFF */
- 0x00000000,
-
- 0xc0016900,
- 0x000002d5,
- 0x00000000, /* VGT_SHADER_STAGES_EN */
-
- 0xc0016900,
- 0x000002dc,
- 0x0000aa00, /* DB_ALPHA_TO_MASK */
-
- 0xc0066900,
- 0x000002de,
- 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0026900,
- 0x000002e5,
- 0x00000000, /* VGT_STRMOUT_CONFIG */
- 0x00000000,
-
- 0xc01b6900,
- 0x000002f5,
- 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
- 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
- 0x00000000, /* PA_SC_LINE_CNTL */
- 0x00000000, /* PA_SC_AA_CONFIG */
- 0x00000005, /* PA_SU_VTX_CNTL */
- 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
- 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
- 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
- 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
- 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
- 0xffffffff,
-
- 0xc0026900,
- 0x00000316,
- 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- 0x00000010, /* */
-};
-
-const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
index f5d0e9a60267..1dca73d9e005 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -20,16 +20,300 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
*/
#ifndef CAYMAN_BLIT_SHADERS_H
#define CAYMAN_BLIT_SHADERS_H
-extern const u32 cayman_ps[];
-extern const u32 cayman_vs[];
-extern const u32 cayman_default_state[];
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The register state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+static const u32 cayman_default_state[] = {
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0036900,
+ 0x0000000f,
+ 0x00000000, /* DB_DEPTH_INFO */
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000100, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0026900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+ 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0106900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
+ 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
+ 0x00000000, /* SPI_GPR_MGMT */
+ 0x00000000, /* SPI_LDS_MGMT */
+ 0x00000000, /* SPI_STACK_MGMT */
+ 0x00000000, /* SPI_WAVE_MGMT_1 */
+ 0x00000000, /* SPI_WAVE_MGMT_2 */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc00e6900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+ 0x00000000, /* DB_EQAA */
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000,
+
+ 0xc01b6900,
+ 0x000002f5,
+ 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+ 0xffffffff,
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
-extern const u32 cayman_ps_size, cayman_vs_size;
-extern const u32 cayman_default_size;
+static const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
deleted file mode 100644
index 1a96ddb3e5ed..000000000000
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Copyright 2010 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <linux/bug.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-/*
- * evergreen cards need to use the 3D engine to blit data which requires
- * quite a bit of hw state setup. Rather than pull the whole 3D driver
- * (which normally generates the 3D state) into the DRM, we opt to use
- * statically generated state tables. The register state and shaders
- * were hand generated to support blitting functionality. See the 3D
- * driver or documentation for descriptions of the registers and
- * shader instructions.
- */
-
-const u32 evergreen_default_state[] =
-{
- 0xc0016900,
- 0x0000023b,
- 0x00000000, /* SQ_LDS_ALLOC_PS */
-
- 0xc0066900,
- 0x00000240,
- 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0046900,
- 0x00000247,
- 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0026900,
- 0x00000010,
- 0x00000000, /* DB_Z_INFO */
- 0x00000000, /* DB_STENCIL_INFO */
-
- 0xc0016900,
- 0x00000200,
- 0x00000000, /* DB_DEPTH_CONTROL */
-
- 0xc0066900,
- 0x00000000,
- 0x00000060, /* DB_RENDER_CONTROL */
- 0x00000000, /* DB_COUNT_CONTROL */
- 0x00000000, /* DB_DEPTH_VIEW */
- 0x0000002a, /* DB_RENDER_OVERRIDE */
- 0x00000000, /* DB_RENDER_OVERRIDE2 */
- 0x00000000, /* DB_HTILE_DATA_BASE */
-
- 0xc0026900,
- 0x0000000a,
- 0x00000000, /* DB_STENCIL_CLEAR */
- 0x00000000, /* DB_DEPTH_CLEAR */
-
- 0xc0016900,
- 0x000002dc,
- 0x0000aa00, /* DB_ALPHA_TO_MASK */
-
- 0xc0016900,
- 0x00000080,
- 0x00000000, /* PA_SC_WINDOW_OFFSET */
-
- 0xc00d6900,
- 0x00000083,
- 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
- 0x00000000, /* PA_SC_CLIPRECT_0_TL */
- 0x20002000, /* PA_SC_CLIPRECT_0_BR */
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0xaaaaaaaa, /* PA_SC_EDGERULE */
- 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
- 0x0000000f, /* CB_TARGET_MASK */
- 0x0000000f, /* CB_SHADER_MASK */
-
- 0xc0226900,
- 0x00000094,
- 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
- 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
- 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
-
- 0xc0016900,
- 0x000000d4,
- 0x00000000, /* SX_MISC */
-
- 0xc0026900,
- 0x00000292,
- 0x00000000, /* PA_SC_MODE_CNTL_0 */
- 0x00000000, /* PA_SC_MODE_CNTL_1 */
-
- 0xc0106900,
- 0x00000300,
- 0x00000000, /* PA_SC_LINE_CNTL */
- 0x00000000, /* PA_SC_AA_CONFIG */
- 0x00000005, /* PA_SU_VTX_CNTL */
- 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
- 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
- 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
- 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
- 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
- 0xffffffff, /* PA_SC_AA_MASK */
-
- 0xc00d6900,
- 0x00000202,
- 0x00cc0010, /* CB_COLOR_CONTROL */
- 0x00000210, /* DB_SHADER_CONTROL */
- 0x00010000, /* PA_CL_CLIP_CNTL */
- 0x00000004, /* PA_SU_SC_MODE_CNTL */
- 0x00000100, /* PA_CL_VTE_CNTL */
- 0x00000000, /* PA_CL_VS_OUT_CNTL */
- 0x00000000, /* PA_CL_NANINF_CNTL */
- 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
- 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
- 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
-
- 0xc0066900,
- 0x000002de,
- 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
-
- 0xc0016900,
- 0x00000229,
- 0x00000000, /* SQ_PGM_START_FS */
-
- 0xc0016900,
- 0x0000022a,
- 0x00000000, /* SQ_PGM_RESOURCES_FS */
-
- 0xc0096900,
- 0x00000100,
- 0x00ffffff, /* VGT_MAX_VTX_INDX */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* SX_ALPHA_TEST_CONTROL */
- 0x00000000, /* CB_BLEND_RED */
- 0x00000000, /* CB_BLEND_GREEN */
- 0x00000000, /* CB_BLEND_BLUE */
- 0x00000000, /* CB_BLEND_ALPHA */
-
- 0xc0026900,
- 0x000002a8,
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
- 0x00000000, /* */
-
- 0xc0026900,
- 0x000002ad,
- 0x00000000, /* VGT_REUSE_OFF */
- 0x00000000, /* */
-
- 0xc0116900,
- 0x00000280,
- 0x00000000, /* PA_SU_POINT_SIZE */
- 0x00000000, /* PA_SU_POINT_MINMAX */
- 0x00000008, /* PA_SU_LINE_CNTL */
- 0x00000000, /* PA_SC_LINE_STIPPLE */
- 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
- 0x00000000, /* VGT_HOS_CNTL */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* VGT_GS_MODE */
-
- 0xc0016900,
- 0x000002a1,
- 0x00000000, /* VGT_PRIMITIVEID_EN */
-
- 0xc0016900,
- 0x000002a5,
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
-
- 0xc0016900,
- 0x000002d5,
- 0x00000000, /* VGT_SHADER_STAGES_EN */
-
- 0xc0026900,
- 0x000002e5,
- 0x00000000, /* VGT_STRMOUT_CONFIG */
- 0x00000000, /* */
-
- 0xc0016900,
- 0x000001e0,
- 0x00000000, /* CB_BLEND0_CONTROL */
-
- 0xc0016900,
- 0x000001b1,
- 0x00000000, /* SPI_VS_OUT_CONFIG */
-
- 0xc0016900,
- 0x00000187,
- 0x00000000, /* SPI_VS_OUT_ID_0 */
-
- 0xc0016900,
- 0x00000191,
- 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
-
- 0xc00b6900,
- 0x000001b3,
- 0x20000001, /* SPI_PS_IN_CONTROL_0 */
- 0x00000000, /* SPI_PS_IN_CONTROL_1 */
- 0x00000000, /* SPI_INTERP_CONTROL_0 */
- 0x00000000, /* SPI_INPUT_Z */
- 0x00000000, /* SPI_FOG_CNTL */
- 0x00100000, /* SPI_BARYC_CNTL */
- 0x00000000, /* SPI_PS_IN_CONTROL_2 */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
- 0x00000000, /* */
-
- 0xc0026900,
- 0x00000316,
- 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- 0x00000010, /* */
-};
-
-const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
index bb8d6c751595..4492524ee1df 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.h
@@ -20,16 +20,284 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
*/
#ifndef EVERGREEN_BLIT_SHADERS_H
#define EVERGREEN_BLIT_SHADERS_H
-extern const u32 evergreen_ps[];
-extern const u32 evergreen_vs[];
-extern const u32 evergreen_default_state[];
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The register state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+static const u32 evergreen_default_state[] = {
+ 0xc0016900,
+ 0x0000023b,
+ 0x00000000, /* SQ_LDS_ALLOC_PS */
+
+ 0xc0066900,
+ 0x00000240,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0046900,
+ 0x00000247,
+ 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x00000010,
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
+
+ 0xc0026900,
+ 0x0000000a,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0106900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_0 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_7 */
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc00d6900,
+ 0x00000202,
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SQ_DYN_GPR_RESOURCE_LIMIT_1 */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x00000229,
+ 0x00000000, /* SQ_PGM_START_FS */
+
+ 0xc0016900,
+ 0x0000022a,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00ffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000, /* */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc0016900,
+ 0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+
+ 0xc0016900,
+ 0x00000187,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
+ 0xc0016900,
+ 0x00000191,
+ 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc00b6900,
+ 0x000001b3,
+ 0x20000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00100000, /* SPI_BARYC_CNTL */
+ 0x00000000, /* SPI_PS_IN_CONTROL_2 */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
-extern const u32 evergreen_ps_size, evergreen_vs_size;
-extern const u32 evergreen_default_size;
+static const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
#endif
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
deleted file mode 100644
index 443cbe59b274..000000000000
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ /dev/null
@@ -1,719 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <linux/bug.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-/*
- * R6xx+ cards need to use the 3D engine to blit data which requires
- * quite a bit of hw state setup. Rather than pull the whole 3D driver
- * (which normally generates the 3D state) into the DRM, we opt to use
- * statically generated state tables. The register state and shaders
- * were hand generated to support blitting functionality. See the 3D
- * driver or documentation for descriptions of the registers and
- * shader instructions.
- */
-
-const u32 r6xx_default_state[] =
-{
- 0xc0002400, /* START_3D_CMDBUF */
- 0x00000000,
-
- 0xc0012800, /* CONTEXT_CONTROL */
- 0x80000000,
- 0x80000000,
-
- 0xc0016800,
- 0x00000010,
- 0x00008000, /* WAIT_UNTIL */
-
- 0xc0016800,
- 0x00000542,
- 0x07000003, /* TA_CNTL_AUX */
-
- 0xc0016800,
- 0x000005c5,
- 0x00000000, /* VC_ENHANCE */
-
- 0xc0016800,
- 0x00000363,
- 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
-
- 0xc0016800,
- 0x0000060c,
- 0x82000000, /* DB_DEBUG */
-
- 0xc0016800,
- 0x0000060e,
- 0x01020204, /* DB_WATERMARKS */
-
- 0xc0026f00,
- 0x00000000,
- 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
- 0x00000000, /* SQ_VTX_START_INST_LOC */
-
- 0xc0096900,
- 0x0000022a,
- 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0016900,
- 0x00000004,
- 0x00000000, /* DB_DEPTH_INFO */
-
- 0xc0026900,
- 0x0000000a,
- 0x00000000, /* DB_STENCIL_CLEAR */
- 0x00000000, /* DB_DEPTH_CLEAR */
-
- 0xc0016900,
- 0x00000200,
- 0x00000000, /* DB_DEPTH_CONTROL */
-
- 0xc0026900,
- 0x00000343,
- 0x00000060, /* DB_RENDER_CONTROL */
- 0x00000040, /* DB_RENDER_OVERRIDE */
-
- 0xc0016900,
- 0x00000351,
- 0x0000aa00, /* DB_ALPHA_TO_MASK */
-
- 0xc00f6900,
- 0x00000100,
- 0x00000800, /* VGT_MAX_VTX_INDX */
- 0x00000000, /* VGT_MIN_VTX_INDX */
- 0x00000000, /* VGT_INDX_OFFSET */
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
- 0x00000000, /* SX_ALPHA_TEST_CONTROL */
- 0x00000000, /* CB_BLEND_RED */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000, /* CB_FOG_RED */
- 0x00000000,
- 0x00000000,
- 0x00000000, /* DB_STENCILREFMASK */
- 0x00000000, /* DB_STENCILREFMASK_BF */
- 0x00000000, /* SX_ALPHA_REF */
-
- 0xc0046900,
- 0x0000030c,
- 0x01000000, /* CB_CLRCMP_CNTL */
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0046900,
- 0x00000048,
- 0x3f800000, /* CB_CLEAR_RED */
- 0x00000000,
- 0x3f800000,
- 0x3f800000,
-
- 0xc0016900,
- 0x00000080,
- 0x00000000, /* PA_SC_WINDOW_OFFSET */
-
- 0xc00a6900,
- 0x00000083,
- 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
- 0x00000000, /* PA_SC_CLIPRECT_0_TL */
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000, /* PA_SC_EDGERULE */
-
- 0xc0406900,
- 0x00000094,
- 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
- 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
- 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
-
- 0xc0026900,
- 0x00000292,
- 0x00000000, /* PA_SC_MPASS_PS_CNTL */
- 0x00004010, /* PA_SC_MODE_CNTL */
-
- 0xc0096900,
- 0x00000300,
- 0x00000000, /* PA_SC_LINE_CNTL */
- 0x00000000, /* PA_SC_AA_CONFIG */
- 0x0000002d, /* PA_SU_VTX_CNTL */
- 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
- 0x3f800000,
- 0x3f800000,
- 0x3f800000,
- 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
- 0x00000000,
-
- 0xc0016900,
- 0x00000312,
- 0xffffffff, /* PA_SC_AA_MASK */
-
- 0xc0066900,
- 0x0000037e,
- 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
- 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
- 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
- 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
- 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
- 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
-
- 0xc0046900,
- 0x000001b6,
- 0x00000000, /* SPI_INPUT_Z */
- 0x00000000, /* SPI_FOG_CNTL */
- 0x00000000, /* SPI_FOG_FUNC_SCALE */
- 0x00000000, /* SPI_FOG_FUNC_BIAS */
-
- 0xc0016900,
- 0x00000225,
- 0x00000000, /* SQ_PGM_START_FS */
-
- 0xc0016900,
- 0x00000229,
- 0x00000000, /* SQ_PGM_RESOURCES_FS */
-
- 0xc0016900,
- 0x00000237,
- 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
-
- 0xc0026900,
- 0x000002a8,
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
-
- 0xc0116900,
- 0x00000280,
- 0x00000000, /* PA_SU_POINT_SIZE */
- 0x00000000, /* PA_SU_POINT_MINMAX */
- 0x00000008, /* PA_SU_LINE_CNTL */
- 0x00000000, /* PA_SC_LINE_STIPPLE */
- 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
- 0x00000000, /* VGT_HOS_CNTL */
- 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
- 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
- 0x00000000, /* VGT_HOS_REUSE_DEPTH */
- 0x00000000, /* VGT_GROUP_PRIM_TYPE */
- 0x00000000, /* VGT_GROUP_FIRST_DECR */
- 0x00000000, /* VGT_GROUP_DECR */
- 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
- 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
- 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
- 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
- 0x00000000, /* VGT_GS_MODE */
-
- 0xc0016900,
- 0x000002a1,
- 0x00000000, /* VGT_PRIMITIVEID_EN */
-
- 0xc0016900,
- 0x000002a5,
- 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
-
- 0xc0036900,
- 0x000002ac,
- 0x00000000, /* VGT_STRMOUT_EN */
- 0x00000000, /* VGT_REUSE_OFF */
- 0x00000000, /* VGT_VTX_CNT_EN */
-
- 0xc0016900,
- 0x000000d4,
- 0x00000000, /* SX_MISC */
-
- 0xc0016900,
- 0x000002c8,
- 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
-
- 0xc0076900,
- 0x00000202,
- 0x00cc0000, /* CB_COLOR_CONTROL */
- 0x00000210, /* DB_SHADER_CNTL */
- 0x00010000, /* PA_CL_CLIP_CNTL */
- 0x00000244, /* PA_SU_SC_MODE_CNTL */
- 0x00000100, /* PA_CL_VTE_CNTL */
- 0x00000000, /* PA_CL_VS_OUT_CNTL */
- 0x00000000, /* PA_CL_NANINF_CNTL */
-
- 0xc0026900,
- 0x0000008e,
- 0x0000000f, /* CB_TARGET_MASK */
- 0x0000000f, /* CB_SHADER_MASK */
-
- 0xc0016900,
- 0x000001e8,
- 0x00000001, /* CB_SHADER_CONTROL */
-
- 0xc0016900,
- 0x00000185,
- 0x00000000, /* SPI_VS_OUT_ID_0 */
-
- 0xc0016900,
- 0x00000191,
- 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
-
- 0xc0056900,
- 0x000001b1,
- 0x00000000, /* SPI_VS_OUT_CONFIG */
- 0x00000000, /* SPI_THREAD_GROUPING */
- 0x00000001, /* SPI_PS_IN_CONTROL_0 */
- 0x00000000, /* SPI_PS_IN_CONTROL_1 */
- 0x00000000, /* SPI_INTERP_CONTROL_0 */
-
- 0xc0036e00, /* SET_SAMPLER */
- 0x00000000,
- 0x00000012,
- 0x00000000,
- 0x00000000,
-};
-
-const u32 r7xx_default_state[] =
-{
- 0xc0012800, /* CONTEXT_CONTROL */
- 0x80000000,
- 0x80000000,
-
- 0xc0016800,
- 0x00000010,
- 0x00008000, /* WAIT_UNTIL */
-
- 0xc0016800,
- 0x00000542,
- 0x07000002, /* TA_CNTL_AUX */
-
- 0xc0016800,
- 0x000005c5,
- 0x00000000, /* VC_ENHANCE */
-
- 0xc0016800,
- 0x00000363,
- 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
-
- 0xc0016800,
- 0x0000060c,
- 0x00000000, /* DB_DEBUG */
-
- 0xc0016800,
- 0x0000060e,
- 0x00420204, /* DB_WATERMARKS */
-
- 0xc0026f00,
- 0x00000000,
- 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
- 0x00000000, /* SQ_VTX_START_INST_LOC */
-
- 0xc0096900,
- 0x0000022a,
- 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0016900,
- 0x00000004,
- 0x00000000, /* DB_DEPTH_INFO */
-
- 0xc0026900,
- 0x0000000a,
- 0x00000000, /* DB_STENCIL_CLEAR */
- 0x00000000, /* DB_DEPTH_CLEAR */
-
- 0xc0016900,
- 0x00000200,
- 0x00000000, /* DB_DEPTH_CONTROL */
-
- 0xc0026900,
- 0x00000343,
- 0x00000060, /* DB_RENDER_CONTROL */
- 0x00000000, /* DB_RENDER_OVERRIDE */
-
- 0xc0016900,
- 0x00000351,
- 0x0000aa00, /* DB_ALPHA_TO_MASK */
-
- 0xc0096900,
- 0x00000100,
- 0x00000800, /* VGT_MAX_VTX_INDX */
- 0x00000000, /* VGT_MIN_VTX_INDX */
- 0x00000000, /* VGT_INDX_OFFSET */
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
- 0x00000000, /* SX_ALPHA_TEST_CONTROL */
- 0x00000000, /* CB_BLEND_RED */
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0036900,
- 0x0000010c,
- 0x00000000, /* DB_STENCILREFMASK */
- 0x00000000, /* DB_STENCILREFMASK_BF */
- 0x00000000, /* SX_ALPHA_REF */
-
- 0xc0046900,
- 0x0000030c, /* CB_CLRCMP_CNTL */
- 0x01000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0016900,
- 0x00000080,
- 0x00000000, /* PA_SC_WINDOW_OFFSET */
-
- 0xc00a6900,
- 0x00000083,
- 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
- 0x00000000, /* PA_SC_CLIPRECT_0_TL */
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0xaaaaaaaa, /* PA_SC_EDGERULE */
-
- 0xc0406900,
- 0x00000094,
- 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
- 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
- 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
- 0x00000000,
- 0x3f800000,
-
- 0xc0026900,
- 0x00000292,
- 0x00000000, /* PA_SC_MPASS_PS_CNTL */
- 0x00514000, /* PA_SC_MODE_CNTL */
-
- 0xc0096900,
- 0x00000300,
- 0x00000000, /* PA_SC_LINE_CNTL */
- 0x00000000, /* PA_SC_AA_CONFIG */
- 0x0000002d, /* PA_SU_VTX_CNTL */
- 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
- 0x3f800000,
- 0x3f800000,
- 0x3f800000,
- 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
- 0x00000000,
-
- 0xc0016900,
- 0x00000312,
- 0xffffffff, /* PA_SC_AA_MASK */
-
- 0xc0066900,
- 0x0000037e,
- 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
- 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
- 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
- 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
- 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
- 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
-
- 0xc0046900,
- 0x000001b6,
- 0x00000000, /* SPI_INPUT_Z */
- 0x00000000, /* SPI_FOG_CNTL */
- 0x00000000, /* SPI_FOG_FUNC_SCALE */
- 0x00000000, /* SPI_FOG_FUNC_BIAS */
-
- 0xc0016900,
- 0x00000225,
- 0x00000000, /* SQ_PGM_START_FS */
-
- 0xc0016900,
- 0x00000229,
- 0x00000000, /* SQ_PGM_RESOURCES_FS */
-
- 0xc0016900,
- 0x00000237,
- 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
-
- 0xc0026900,
- 0x000002a8,
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
-
- 0xc0116900,
- 0x00000280,
- 0x00000000, /* PA_SU_POINT_SIZE */
- 0x00000000, /* PA_SU_POINT_MINMAX */
- 0x00000008, /* PA_SU_LINE_CNTL */
- 0x00000000, /* PA_SC_LINE_STIPPLE */
- 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
- 0x00000000, /* VGT_HOS_CNTL */
- 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
- 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
- 0x00000000, /* VGT_HOS_REUSE_DEPTH */
- 0x00000000, /* VGT_GROUP_PRIM_TYPE */
- 0x00000000, /* VGT_GROUP_FIRST_DECR */
- 0x00000000, /* VGT_GROUP_DECR */
- 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
- 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
- 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
- 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
- 0x00000000, /* VGT_GS_MODE */
-
- 0xc0016900,
- 0x000002a1,
- 0x00000000, /* VGT_PRIMITIVEID_EN */
-
- 0xc0016900,
- 0x000002a5,
- 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
-
- 0xc0036900,
- 0x000002ac,
- 0x00000000, /* VGT_STRMOUT_EN */
- 0x00000000, /* VGT_REUSE_OFF */
- 0x00000000, /* VGT_VTX_CNT_EN */
-
- 0xc0016900,
- 0x000000d4,
- 0x00000000, /* SX_MISC */
-
- 0xc0016900,
- 0x000002c8,
- 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
-
- 0xc0076900,
- 0x00000202,
- 0x00cc0000, /* CB_COLOR_CONTROL */
- 0x00000210, /* DB_SHADER_CNTL */
- 0x00010000, /* PA_CL_CLIP_CNTL */
- 0x00000244, /* PA_SU_SC_MODE_CNTL */
- 0x00000100, /* PA_CL_VTE_CNTL */
- 0x00000000, /* PA_CL_VS_OUT_CNTL */
- 0x00000000, /* PA_CL_NANINF_CNTL */
-
- 0xc0026900,
- 0x0000008e,
- 0x0000000f, /* CB_TARGET_MASK */
- 0x0000000f, /* CB_SHADER_MASK */
-
- 0xc0016900,
- 0x000001e8,
- 0x00000001, /* CB_SHADER_CONTROL */
-
- 0xc0016900,
- 0x00000185,
- 0x00000000, /* SPI_VS_OUT_ID_0 */
-
- 0xc0016900,
- 0x00000191,
- 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
-
- 0xc0056900,
- 0x000001b1,
- 0x00000000, /* SPI_VS_OUT_CONFIG */
- 0x00000001, /* SPI_THREAD_GROUPING */
- 0x00000001, /* SPI_PS_IN_CONTROL_0 */
- 0x00000000, /* SPI_PS_IN_CONTROL_1 */
- 0x00000000, /* SPI_INTERP_CONTROL_0 */
-
- 0xc0036e00, /* SET_SAMPLER */
- 0x00000000,
- 0x00000012,
- 0x00000000,
- 0x00000000,
-};
-
-/* same for r6xx/r7xx */
-const u32 r6xx_vs[] =
-{
- 0x00000004,
- 0x81000000,
- 0x0000203c,
- 0x94000b08,
- 0x00004000,
- 0x14200b1a,
- 0x00000000,
- 0x00000000,
- 0x3c000000,
- 0x68cd1000,
-#ifdef __BIG_ENDIAN
- 0x000a0000,
-#else
- 0x00080000,
-#endif
- 0x00000000,
-};
-
-const u32 r6xx_ps[] =
-{
- 0x00000002,
- 0x80800000,
- 0x00000000,
- 0x94200688,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 r6xx_ps_size = ARRAY_SIZE(r6xx_ps);
-const u32 r6xx_vs_size = ARRAY_SIZE(r6xx_vs);
-const u32 r6xx_default_size = ARRAY_SIZE(r6xx_default_state);
-const u32 r7xx_default_size = ARRAY_SIZE(r7xx_default_state);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index c67b6ddb29a4..e765abcb3b01 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1629,7 +1629,7 @@ int radeon_pm_late_init(struct radeon_device *rdev)
ret = device_create_file(rdev->dev, &dev_attr_power_method);
if (ret)
DRM_ERROR("failed to create device file for power method\n");
- if (!ret)
+ else
rdev->pm.sysfs_initialized = true;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
index 49bbb2266c0f..6416f129e090 100644
--- a/drivers/gpu/drm/radeon/radeon_sync.c
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
struct dma_fence *f;
int r = 0;
- dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) {
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(!shared), f) {
fence = to_radeon_fence(f);
if (fence && fence->rdev == rdev)
radeon_sync_fence(sync, fence);
diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.c b/drivers/gpu/drm/radeon/si_blit_shaders.c
deleted file mode 100644
index ec415e7dfa4b..000000000000
--- a/drivers/gpu/drm/radeon/si_blit_shaders.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <linux/types.h>
-#include <linux/bug.h>
-#include <linux/kernel.h>
-
-const u32 si_default_state[] =
-{
- 0xc0066900,
- 0x00000000,
- 0x00000060, /* DB_RENDER_CONTROL */
- 0x00000000, /* DB_COUNT_CONTROL */
- 0x00000000, /* DB_DEPTH_VIEW */
- 0x0000002a, /* DB_RENDER_OVERRIDE */
- 0x00000000, /* DB_RENDER_OVERRIDE2 */
- 0x00000000, /* DB_HTILE_DATA_BASE */
-
- 0xc0046900,
- 0x00000008,
- 0x00000000, /* DB_DEPTH_BOUNDS_MIN */
- 0x00000000, /* DB_DEPTH_BOUNDS_MAX */
- 0x00000000, /* DB_STENCIL_CLEAR */
- 0x00000000, /* DB_DEPTH_CLEAR */
-
- 0xc0036900,
- 0x0000000f,
- 0x00000000, /* DB_DEPTH_INFO */
- 0x00000000, /* DB_Z_INFO */
- 0x00000000, /* DB_STENCIL_INFO */
-
- 0xc0016900,
- 0x00000080,
- 0x00000000, /* PA_SC_WINDOW_OFFSET */
-
- 0xc00d6900,
- 0x00000083,
- 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
- 0x00000000, /* PA_SC_CLIPRECT_0_TL */
- 0x20002000, /* PA_SC_CLIPRECT_0_BR */
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0x00000000,
- 0x20002000,
- 0xaaaaaaaa, /* PA_SC_EDGERULE */
- 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
- 0x0000000f, /* CB_TARGET_MASK */
- 0x0000000f, /* CB_SHADER_MASK */
-
- 0xc0226900,
- 0x00000094,
- 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
- 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x80000000,
- 0x20002000,
- 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
- 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
-
- 0xc0026900,
- 0x000000d9,
- 0x00000000, /* CP_RINGID */
- 0x00000000, /* CP_VMID */
-
- 0xc0046900,
- 0x00000100,
- 0xffffffff, /* VGT_MAX_VTX_INDX */
- 0x00000000, /* VGT_MIN_VTX_INDX */
- 0x00000000, /* VGT_INDX_OFFSET */
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
-
- 0xc0046900,
- 0x00000105,
- 0x00000000, /* CB_BLEND_RED */
- 0x00000000, /* CB_BLEND_GREEN */
- 0x00000000, /* CB_BLEND_BLUE */
- 0x00000000, /* CB_BLEND_ALPHA */
-
- 0xc0016900,
- 0x000001e0,
- 0x00000000, /* CB_BLEND0_CONTROL */
-
- 0xc00e6900,
- 0x00000200,
- 0x00000000, /* DB_DEPTH_CONTROL */
- 0x00000000, /* DB_EQAA */
- 0x00cc0010, /* CB_COLOR_CONTROL */
- 0x00000210, /* DB_SHADER_CONTROL */
- 0x00010000, /* PA_CL_CLIP_CNTL */
- 0x00000004, /* PA_SU_SC_MODE_CNTL */
- 0x00000100, /* PA_CL_VTE_CNTL */
- 0x00000000, /* PA_CL_VS_OUT_CNTL */
- 0x00000000, /* PA_CL_NANINF_CNTL */
- 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
- 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
- 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
- 0x00000000, /* */
- 0x00000000, /* */
-
- 0xc0116900,
- 0x00000280,
- 0x00000000, /* PA_SU_POINT_SIZE */
- 0x00000000, /* PA_SU_POINT_MINMAX */
- 0x00000008, /* PA_SU_LINE_CNTL */
- 0x00000000, /* PA_SC_LINE_STIPPLE */
- 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
- 0x00000000, /* VGT_HOS_CNTL */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000, /* VGT_GS_MODE */
-
- 0xc0026900,
- 0x00000292,
- 0x00000000, /* PA_SC_MODE_CNTL_0 */
- 0x00000000, /* PA_SC_MODE_CNTL_1 */
-
- 0xc0016900,
- 0x000002a1,
- 0x00000000, /* VGT_PRIMITIVEID_EN */
-
- 0xc0016900,
- 0x000002a5,
- 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
-
- 0xc0026900,
- 0x000002a8,
- 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
- 0x00000000,
-
- 0xc0026900,
- 0x000002ad,
- 0x00000000, /* VGT_REUSE_OFF */
- 0x00000000,
-
- 0xc0016900,
- 0x000002d5,
- 0x00000000, /* VGT_SHADER_STAGES_EN */
-
- 0xc0016900,
- 0x000002dc,
- 0x0000aa00, /* DB_ALPHA_TO_MASK */
-
- 0xc0066900,
- 0x000002de,
- 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-
- 0xc0026900,
- 0x000002e5,
- 0x00000000, /* VGT_STRMOUT_CONFIG */
- 0x00000000,
-
- 0xc01b6900,
- 0x000002f5,
- 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
- 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
- 0x00000000, /* PA_SC_LINE_CNTL */
- 0x00000000, /* PA_SC_AA_CONFIG */
- 0x00000005, /* PA_SU_VTX_CNTL */
- 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
- 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
- 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
- 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
- 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
- 0xffffffff,
-
- 0xc0026900,
- 0x00000316,
- 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- 0x00000010, /* */
-};
-
-const u32 si_default_size = ARRAY_SIZE(si_default_state);
diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.h b/drivers/gpu/drm/radeon/si_blit_shaders.h
index c739e51e3961..829a2b6228b7 100644
--- a/drivers/gpu/drm/radeon/si_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/si_blit_shaders.h
@@ -25,8 +25,227 @@
#ifndef SI_BLIT_SHADERS_H
#define SI_BLIT_SHADERS_H
-extern const u32 si_default_state[];
+static const u32 si_default_state[] = {
+ 0xc0066900,
+ 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_COUNT_CONTROL */
+ 0x00000000, /* DB_DEPTH_VIEW */
+ 0x0000002a, /* DB_RENDER_OVERRIDE */
+ 0x00000000, /* DB_RENDER_OVERRIDE2 */
+ 0x00000000, /* DB_HTILE_DATA_BASE */
-extern const u32 si_default_size;
+ 0xc0046900,
+ 0x00000008,
+ 0x00000000, /* DB_DEPTH_BOUNDS_MIN */
+ 0x00000000, /* DB_DEPTH_BOUNDS_MAX */
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
+ 0xc0036900,
+ 0x0000000f,
+ 0x00000000, /* DB_DEPTH_INFO */
+ 0x00000000, /* DB_Z_INFO */
+ 0x00000000, /* DB_STENCIL_INFO */
+
+ 0xc0016900,
+ 0x00000080,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00d6900,
+ 0x00000083,
+ 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
+ 0x20002000, /* PA_SC_CLIPRECT_0_BR */
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0x00000000,
+ 0x20002000,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
+ 0xc0226900,
+ 0x00000094,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x80000000,
+ 0x20002000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
+ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+
+ 0xc0046900,
+ 0x00000100,
+ 0xffffffff, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+
+ 0xc0046900,
+ 0x00000105,
+ 0x00000000, /* CB_BLEND_RED */
+ 0x00000000, /* CB_BLEND_GREEN */
+ 0x00000000, /* CB_BLEND_BLUE */
+ 0x00000000, /* CB_BLEND_ALPHA */
+
+ 0xc0016900,
+ 0x000001e0,
+ 0x00000000, /* CB_BLEND0_CONTROL */
+
+ 0xc00e6900,
+ 0x00000200,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+ 0x00000000, /* DB_EQAA */
+ 0x00cc0010, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CONTROL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000004, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
+ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
+ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
+ 0x00000000, /* */
+ 0x00000000, /* */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000, /* VGT_GS_MODE */
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MODE_CNTL_0 */
+ 0x00000000, /* PA_SC_MODE_CNTL_1 */
+
+ 0xc0016900,
+ 0x000002a1,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
+ 0xc0016900,
+ 0x000002a5,
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
+
+ 0xc0026900,
+ 0x000002a8,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002ad,
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000,
+
+ 0xc0016900,
+ 0x000002d5,
+ 0x00000000, /* VGT_SHADER_STAGES_EN */
+
+ 0xc0016900,
+ 0x000002dc,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0066900,
+ 0x000002de,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+
+ 0xc0026900,
+ 0x000002e5,
+ 0x00000000, /* VGT_STRMOUT_CONFIG */
+ 0x00000000,
+
+ 0xc01b6900,
+ 0x000002f5,
+ 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
+ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x00000005, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
+ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
+ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
+ 0xffffffff,
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+};
+
+static const u32 si_default_size = ARRAY_SIZE(si_default_state);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 56ae38389db0..462fae73eae9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -222,13 +222,11 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
/* Set the physical address of the buffer in memory */
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
- paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
if (fb->format->num_planes > 1) {
paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
- paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
paddr);
@@ -236,7 +234,6 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
if (fb->format->num_planes > 2) {
paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
- paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
paddr);
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 1eef1849cf54..061be9a6619d 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -2,6 +2,9 @@
config DRM_VC4
tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
+ # Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
+ # happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
+ depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
depends on DRM
depends on SND && SND_SOC
depends on COMMON_CLK
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 752f921735c6..98308a17e4ed 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
unsigned long phy_clock;
int ret;
- ret = pm_runtime_get_sync(dev);
+ ret = pm_runtime_resume_and_get(dev);
if (ret) {
DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
return;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 408ede1f967f..eb94433067ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
return container_of(bo, struct vmw_buffer_object, base);
}
+/**
+ * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
+ * @bo: ttm buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+static bool bo_is_vmw(struct ttm_buffer_object *bo)
+{
+ return bo->destroy == &vmw_bo_bo_free ||
+ bo->destroy == &vmw_gem_destroy;
+}
/**
* vmw_bo_pin_in_placement - Validate a buffer to placement.
@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
- ret != -EBUSY)) {
+ if (unlikely(ret != 0)) {
+ if (ret == -ERESTARTSYS || ret == -EBUSY)
+ return -EBUSY;
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
@@ -805,7 +821,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
/* Is @bo embedded in a struct vmw_buffer_object? */
- if (vmw_bo_is_vmw_bo(bo))
+ if (!bo_is_vmw(bo))
return;
/* Kill any cached kernel maps before swapout */
@@ -829,7 +845,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct vmw_buffer_object *vbo;
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (vmw_bo_is_vmw_bo(bo))
+ if (!bo_is_vmw(bo))
return;
vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -850,22 +866,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
-
-/**
- * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
- * @bo: buffer object to be checked
- *
- * Uses destroy function associated with the object to determine if this is
- * a &vmw_buffer_object.
- *
- * Returns:
- * true if the object is of &vmw_buffer_object type, false if not.
- */
-bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
-{
- if (bo->destroy == &vmw_bo_bo_free ||
- bo->destroy == &vmw_gem_destroy)
- return true;
-
- return false;
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ffd975c14136..01a5b47e95f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -994,13 +994,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
goto out_no_fman;
}
- drm_vma_offset_manager_init(&dev_priv->vma_manager,
- DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping,
- &dev_priv->vma_manager,
+ dev_priv->drm.vma_offset_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false);
if (unlikely(ret != 0)) {
@@ -1170,7 +1167,6 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_devcaps_destroy(dev_priv);
vmw_vram_manager_fini(dev_priv);
ttm_device_fini(&dev_priv->bdev);
- drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1411,7 +1407,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
- &dev_priv->vma_manager);
+ dev_priv->drm.vma_offset_manager);
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 00e8e27e4884..ace7ca150b03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
+ if (base->shareable && res && res->backup)
+ drm_gem_object_put(&res->backup->base.base);
+
*p_base = NULL;
vmw_resource_unreference(&res);
}
@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
+ drm_gem_object_get(&res->backup->base.base);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
&res->backup);
if (ret == 0)
vmw_bo_reference(res->backup);
-
}
if (unlikely(ret != 0)) {
@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
+ if (user_srf->prime.base.shareable)
+ drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 27f969b3dc07..e9e2db68b9fb 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -179,6 +179,12 @@ struct imx_i2c_hwdata {
unsigned int ndivs;
unsigned int i2sr_clr_opcode;
unsigned int i2cr_ien_opcode;
+ /*
+ * Errata ERR007805 or e7805:
+ * I2C: When the I2C clock speed is configured for 400 kHz,
+ * the SCL low period violates the I2C spec of 1.3 uS min.
+ */
+ bool has_err007805;
};
struct imx_i2c_dma {
@@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
};
+static const struct imx_i2c_hwdata imx6_i2c_hwdata = {
+ .devtype = IMX21_I2C,
+ .regshift = IMX_I2C_REGSHIFT,
+ .clk_div = imx_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(imx_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_1,
+ .has_err007805 = true,
+};
+
static struct imx_i2c_hwdata vf610_i2c_hwdata = {
.devtype = VF610_I2C,
.regshift = VF610_I2C_REGSHIFT,
@@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
static const struct of_device_id i2c_imx_dt_ids[] = {
{ .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
{ .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+ { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
+ { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
{ .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
{ /* sentinel */ }
};
@@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
unsigned int div;
int i;
+ if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) {
+ dev_dbg(&i2c_imx->adapter.dev,
+ "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n",
+ i2c_imx->bitrate);
+ i2c_imx->bitrate = 384000;
+ }
+
/* Divider value calculation */
if (i2c_imx->cur_clk == i2c_clk_rate)
return;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f4820fd3dc13..c0364314877e 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -145,8 +145,8 @@
#define ISMT_SPGT_SPD_MASK 0xc0000000 /* SMBus Speed mask */
#define ISMT_SPGT_SPD_80K 0x00 /* 80 kHz */
#define ISMT_SPGT_SPD_100K (0x1 << 30) /* 100 kHz */
-#define ISMT_SPGT_SPD_400K (0x2 << 30) /* 400 kHz */
-#define ISMT_SPGT_SPD_1M (0x3 << 30) /* 1 MHz */
+#define ISMT_SPGT_SPD_400K (0x2U << 30) /* 400 kHz */
+#define ISMT_SPGT_SPD_1M (0x3U << 30) /* 1 MHz */
/* MSI Control Register (MSICTL) bit definitions */
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 7728c8460dc0..9028ffb58cc0 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
TXFIFO_WR(smbus, msg->buf[msg->len-1] |
(stop ? MTXFIFO_STOP : 0));
+
+ if (stop) {
+ err = pasemi_smb_waitready(smbus);
+ if (err)
+ goto reset_out;
+ }
}
return 0;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index fc1dcc19f2a1..5b920f0fc7dd 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
- if (ret) {
- dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index cf5d049342ea..ab0adaa130da 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
.addr = umsg.addr,
.flags = umsg.flags,
.len = umsg.len,
- .buf = compat_ptr(umsg.buf)
+ .buf = (__force __u8 *)compat_ptr(umsg.buf),
};
}
@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
i2c_dev->dev.class = i2c_dev_class;
i2c_dev->dev.parent = &adap->dev;
i2c_dev->dev.release = i2cdev_dev_release;
- dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+ res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+ if (res)
+ goto err_put_i2c_dev;
res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
- if (res) {
- put_i2c_dev(i2c_dev, false);
- return res;
- }
+ if (res)
+ goto err_put_i2c_dev;
pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
return 0;
+
+err_put_i2c_dev:
+ put_i2c_dev(i2c_dev, false);
+ return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b7640cfe0020..47551ab73ca8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -69,7 +69,12 @@ static unsigned int preferred_states_mask;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static unsigned long auto_demotion_disable_flags;
-static bool disable_promotion_to_c1e;
+
+static enum {
+ C1E_PROMOTION_PRESERVE,
+ C1E_PROMOTION_ENABLE,
+ C1E_PROMOTION_DISABLE
+} c1e_promotion = C1E_PROMOTION_PRESERVE;
struct idle_cpu {
struct cpuidle_state *state_table;
@@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
#endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
-static void c1e_promotion_enable(void);
-
/**
* ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
*
@@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void)
unsigned long long msr;
/* Check if user prefers C1E over C1. */
- if (preferred_states_mask & BIT(2)) {
- if (preferred_states_mask & BIT(1))
- /* Both can't be enabled, stick to the defaults. */
- return;
-
+ if ((preferred_states_mask & BIT(2)) &&
+ !(preferred_states_mask & BIT(1))) {
+ /* Disable C1 and enable C1E. */
spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
/* Enable C1E using the "C1E promotion" bit. */
- c1e_promotion_enable();
- disable_promotion_to_c1e = false;
+ c1e_promotion = C1E_PROMOTION_ENABLE;
}
/*
@@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
if (auto_demotion_disable_flags)
auto_demotion_disable();
- if (disable_promotion_to_c1e)
+ if (c1e_promotion == C1E_PROMOTION_ENABLE)
+ c1e_promotion_enable();
+ else if (c1e_promotion == C1E_PROMOTION_DISABLE)
c1e_promotion_disable();
return 0;
@@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void)
if (icpu) {
cpuidle_state_table = icpu->state_table;
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
- disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
+ if (icpu->disable_promotion_to_c1e)
+ c1e_promotion = C1E_PROMOTION_DISABLE;
if (icpu->use_acpi || force_use_acpi)
intel_idle_acpi_cst_extract();
} else if (!intel_idle_acpi_cst_extract()) {
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index ef9d27759961..ec9acbf12b9a 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -745,7 +745,7 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_DIR_RISING:
addr = AD7280A_CELL_OVERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
st->cell_threshhigh = value;
@@ -753,7 +753,7 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_DIR_FALLING:
addr = AD7280A_CELL_UNDERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
st->cell_threshlow = value;
@@ -770,18 +770,18 @@ static int ad7280a_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_DIR_RISING:
addr = AD7280A_AUX_ADC_OVERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
- st->aux_threshhigh = val;
+ st->aux_threshhigh = value;
break;
case IIO_EV_DIR_FALLING:
addr = AD7280A_AUX_ADC_UNDERVOLTAGE_REG;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr,
- 1, val);
+ 1, value);
if (ret)
break;
- st->aux_threshlow = val;
+ st->aux_threshlow = value;
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index 20d4e7584e92..37143b5526ee 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -471,12 +471,15 @@ static ssize_t calibration_forced_value_store(struct device *dev,
ret = scd4x_write_and_fetch(state, CMD_FRC, arg, &val, sizeof(val));
mutex_unlock(&state->lock);
+ if (ret)
+ return ret;
+
if (val == 0xff) {
dev_err(dev, "forced calibration has failed");
return -EINVAL;
}
- return ret ?: len;
+ return len;
}
static IIO_DEVICE_ATTR_RW(calibration_auto_enable, 0);
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index 97f13c0b9631..d5ea1a1be122 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -656,7 +656,7 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
{
struct reg_addr_pool addr;
int ret;
- u16 val;
+ int val;
dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset",
GPIOD_OUT_LOW);
@@ -809,10 +809,10 @@ static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
gain_child = fwnode_get_named_child_node(child,
"custom-output-range-config");
- if (IS_ERR(gain_child)) {
+ if (!gain_child) {
dev_err(dev,
"mandatory custom-output-range-config property missing\n");
- return PTR_ERR(gain_child);
+ return -EINVAL;
}
dac->ch_data[ch].range_override = 1;
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 14cfabacbea5..fdf824041497 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -178,7 +178,7 @@ static int ad5446_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- *val = st->cached_val;
+ *val = st->cached_val >> chan->scan_type.shift;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_mv;
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index a424b7220b61..4434c1b2a322 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -522,7 +522,7 @@ static int ad5592r_alloc_channels(struct iio_dev *iio_dev)
if (!ret)
st->channel_modes[reg] = tmp;
- fwnode_property_read_u32(child, "adi,off-state", &tmp);
+ ret = fwnode_property_read_u32(child, "adi,off-state", &tmp);
if (!ret)
st->channel_offstate[reg] = tmp;
}
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index e41861d29767..2f9c384885f4 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -298,7 +298,7 @@ static int ltc2688_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- *val = 16;
+ *val2 = 16;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_CALIBBIAS:
ret = regmap_read(st->regmap,
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 4a3b8d875518..0b775f943db3 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
enum chip_id {
@@ -311,6 +312,7 @@ static int dac5571_probe(struct i2c_client *client,
const struct dac5571_spec *spec;
struct dac5571_data *data;
struct iio_dev *indio_dev;
+ enum chip_id chip_id;
int ret, i;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
@@ -326,7 +328,13 @@ static int dac5571_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = dac5571_channels;
- spec = &dac5571_spec[id->driver_data];
+ if (dev_fwnode(dev))
+ chip_id = (uintptr_t)device_get_match_data(dev);
+ else
+ chip_id = id->driver_data;
+
+ spec = &dac5571_spec[chip_id];
+
indio_dev->num_channels = spec->num_channels;
data->spec = spec;
@@ -385,15 +393,15 @@ static int dac5571_remove(struct i2c_client *i2c)
}
static const struct of_device_id dac5571_of_id[] = {
- {.compatible = "ti,dac5571"},
- {.compatible = "ti,dac6571"},
- {.compatible = "ti,dac7571"},
- {.compatible = "ti,dac5574"},
- {.compatible = "ti,dac6574"},
- {.compatible = "ti,dac7574"},
- {.compatible = "ti,dac5573"},
- {.compatible = "ti,dac6573"},
- {.compatible = "ti,dac7573"},
+ {.compatible = "ti,dac5571", .data = (void *)single_8bit},
+ {.compatible = "ti,dac6571", .data = (void *)single_10bit},
+ {.compatible = "ti,dac7571", .data = (void *)single_12bit},
+ {.compatible = "ti,dac5574", .data = (void *)quad_8bit},
+ {.compatible = "ti,dac6574", .data = (void *)quad_10bit},
+ {.compatible = "ti,dac7574", .data = (void *)quad_12bit},
+ {.compatible = "ti,dac5573", .data = (void *)quad_8bit},
+ {.compatible = "ti,dac6573", .data = (void *)quad_10bit},
+ {.compatible = "ti,dac7573", .data = (void *)quad_12bit},
{}
};
MODULE_DEVICE_TABLE(of, dac5571_of_id);
diff --git a/drivers/iio/filter/Kconfig b/drivers/iio/filter/Kconfig
index 3ae35817ad82..a85b345ea14e 100644
--- a/drivers/iio/filter/Kconfig
+++ b/drivers/iio/filter/Kconfig
@@ -8,6 +8,7 @@ menu "Filters"
config ADMV8818
tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter"
depends on SPI && COMMON_CLK && 64BIT
+ select REGMAP_SPI
help
Say yes here to build support for Analog Devices ADMV8818
2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter.
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
index 824b5124a5f5..01336105792e 100644
--- a/drivers/iio/imu/bmi160/bmi160_core.c
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -730,7 +730,7 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
if (ret)
- return ret;
+ goto disable_regulator;
usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
@@ -741,29 +741,37 @@ static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
if (use_spi) {
ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
if (ret)
- return ret;
+ goto disable_regulator;
}
ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
if (ret) {
dev_err(dev, "Error reading chip id\n");
- return ret;
+ goto disable_regulator;
}
if (val != BMI160_CHIP_ID_VAL) {
dev_err(dev, "Wrong chip id, got %x expected %x\n",
val, BMI160_CHIP_ID_VAL);
- return -ENODEV;
+ ret = -ENODEV;
+ goto disable_regulator;
}
ret = bmi160_set_mode(data, BMI160_ACCEL, true);
if (ret)
- return ret;
+ goto disable_regulator;
ret = bmi160_set_mode(data, BMI160_GYRO, true);
if (ret)
- return ret;
+ goto disable_accel;
return 0;
+
+disable_accel:
+ bmi160_set_mode(data, BMI160_ACCEL, false);
+
+disable_regulator:
+ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
+ return ret;
}
static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 33d9afb1ba91..d4a692b838d0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -18,12 +18,15 @@ static int inv_icm42600_i2c_bus_setup(struct inv_icm42600_state *st)
unsigned int mask, val;
int ret;
- /* setup interface registers */
- ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
- INV_ICM42600_INTF_CONFIG6_MASK,
- INV_ICM42600_INTF_CONFIG6_I3C_EN);
- if (ret)
- return ret;
+ /*
+ * setup interface registers
+ * This register write to REG_INTF_CONFIG6 enables a spike filter that
+ * is impacting the line and can prevent the I2C ACK to be seen by the
+ * controller. So we don't test the return value.
+ */
+ regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6,
+ INV_ICM42600_INTF_CONFIG6_MASK,
+ INV_ICM42600_INTF_CONFIG6_I3C_EN);
ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4,
INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 088f748b683e..2432e697150c 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -416,6 +416,7 @@ static int ak8975_power_on(const struct ak8975_data *data)
if (ret) {
dev_warn(&data->client->dev,
"Failed to enable specified Vid supply\n");
+ regulator_disable(data->vdd);
return ret;
}
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index 0d9bbbb50cb4..70c37f664f6d 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -70,13 +70,17 @@
#define SX9324_REG_AFE_PH2 0x2a
#define SX9324_REG_AFE_PH3 0x2b
#define SX9324_REG_AFE_CTRL8 0x2c
-#define SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM 0x02
+#define SX9324_REG_AFE_CTRL8_RESERVED 0x10
+#define SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM 0x02
#define SX9324_REG_AFE_CTRL9 0x2d
#define SX9324_REG_AFE_CTRL9_AGAIN_1 0x08
#define SX9324_REG_PROX_CTRL0 0x30
#define SX9324_REG_PROX_CTRL0_GAIN_MASK GENMASK(5, 3)
-#define SX9324_REG_PROX_CTRL0_GAIN_1 0x80
+#define SX9324_REG_PROX_CTRL0_GAIN_SHIFT 3
+#define SX9324_REG_PROX_CTRL0_GAIN_RSVD 0x0
+#define SX9324_REG_PROX_CTRL0_GAIN_1 0x1
+#define SX9324_REG_PROX_CTRL0_GAIN_8 0x4
#define SX9324_REG_PROX_CTRL0_RAWFILT_MASK GENMASK(2, 0)
#define SX9324_REG_PROX_CTRL0_RAWFILT_1P50 0x01
#define SX9324_REG_PROX_CTRL1 0x31
@@ -379,7 +383,14 @@ static int sx9324_read_gain(struct sx_common_data *data,
if (ret)
return ret;
- *val = 1 << FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval);
+ regval = FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval);
+ if (regval)
+ regval--;
+ else if (regval == SX9324_REG_PROX_CTRL0_GAIN_RSVD ||
+ regval > SX9324_REG_PROX_CTRL0_GAIN_8)
+ return -EINVAL;
+
+ *val = 1 << regval;
return IIO_VAL_INT;
}
@@ -725,8 +736,12 @@ static int sx9324_write_gain(struct sx_common_data *data,
unsigned int gain, reg;
int ret;
- gain = ilog2(val);
reg = SX9324_REG_PROX_CTRL0 + chan->channel / 2;
+
+ gain = ilog2(val) + 1;
+ if (val <= 0 || gain > SX9324_REG_PROX_CTRL0_GAIN_8)
+ return -EINVAL;
+
gain = FIELD_PREP(SX9324_REG_PROX_CTRL0_GAIN_MASK, gain);
mutex_lock(&data->mutex);
@@ -781,12 +796,15 @@ static const struct sx_common_reg_default sx9324_default_regs[] = {
{ SX9324_REG_AFE_PH2, 0x1a },
{ SX9324_REG_AFE_PH3, 0x16 },
- { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM },
+ { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESERVED |
+ SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM },
{ SX9324_REG_AFE_CTRL9, SX9324_REG_AFE_CTRL9_AGAIN_1 },
- { SX9324_REG_PROX_CTRL0, SX9324_REG_PROX_CTRL0_GAIN_1 |
+ { SX9324_REG_PROX_CTRL0,
+ SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT |
SX9324_REG_PROX_CTRL0_RAWFILT_1P50 },
- { SX9324_REG_PROX_CTRL1, SX9324_REG_PROX_CTRL0_GAIN_1 |
+ { SX9324_REG_PROX_CTRL1,
+ SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT |
SX9324_REG_PROX_CTRL0_RAWFILT_1P50 },
{ SX9324_REG_PROX_CTRL2, SX9324_REG_PROX_CTRL2_AVGNEG_THRESH_16K },
{ SX9324_REG_PROX_CTRL3, SX9324_REG_PROX_CTRL3_AVGDEB_2SAMPLES |
diff --git a/drivers/iio/proximity/sx_common.c b/drivers/iio/proximity/sx_common.c
index a7c07316a0a9..8ad814d96b7e 100644
--- a/drivers/iio/proximity/sx_common.c
+++ b/drivers/iio/proximity/sx_common.c
@@ -521,6 +521,7 @@ int sx_common_probe(struct i2c_client *client,
return dev_err_probe(dev, ret, "error reading WHOAMI\n");
ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(dev));
+ indio_dev->dev.of_node = client->dev.of_node;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = data->chip_info->iio_channels;
diff --git a/drivers/input/keyboard/cypress-sf.c b/drivers/input/keyboard/cypress-sf.c
index c28996028e80..9a23eed6a4f4 100644
--- a/drivers/input/keyboard/cypress-sf.c
+++ b/drivers/input/keyboard/cypress-sf.c
@@ -61,6 +61,14 @@ static irqreturn_t cypress_sf_irq_handler(int irq, void *devid)
return IRQ_HANDLED;
}
+static void cypress_sf_disable_regulators(void *arg)
+{
+ struct cypress_sf_data *touchkey = arg;
+
+ regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators),
+ touchkey->regulators);
+}
+
static int cypress_sf_probe(struct i2c_client *client)
{
struct cypress_sf_data *touchkey;
@@ -121,6 +129,12 @@ static int cypress_sf_probe(struct i2c_client *client)
return error;
}
+ error = devm_add_action_or_reset(&client->dev,
+ cypress_sf_disable_regulators,
+ touchkey);
+ if (error)
+ return error;
+
touchkey->input_dev = devm_input_allocate_device(&client->dev);
if (!touchkey->input_dev) {
dev_err(&client->dev, "Failed to allocate input device\n");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 43375b38ee59..8a7ce41b8c56 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
* revision register.
*/
error = pm_runtime_get_sync(dev);
- if (error) {
+ if (error < 0) {
dev_err(dev, "pm_runtime_get_sync() failed\n");
pm_runtime_put_noidle(dev);
return error;
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index 12d59c36df53..5f7c0f85fa8e 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -47,7 +47,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM
DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
@@ -129,7 +128,6 @@ DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
@@ -160,7 +158,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
@@ -372,22 +369,6 @@ static struct qcom_icc_desc sc7180_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node *ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static struct qcom_icc_desc sc7180_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
@@ -519,8 +500,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sc7180_dc_noc},
{ .compatible = "qcom,sc7180-gem-noc",
.data = &sc7180_gem_noc},
- { .compatible = "qcom,sc7180-ipa-virt",
- .data = &sc7180_ipa_virt},
{ .compatible = "qcom,sc7180-mc-virt",
.data = &sc7180_mc_virt},
{ .compatible = "qcom,sc7180-mmss-noc",
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index 03d604f84cc5..e3ac25a997b7 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -18,7 +18,6 @@
#include "icc-rpmh.h"
#include "sdx55.h"
-DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0);
DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC);
@@ -40,7 +39,6 @@ DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC);
DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4);
DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0);
DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC);
@@ -82,7 +80,6 @@ DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8);
DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg);
DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie);
@@ -219,22 +216,6 @@ static const struct qcom_icc_desc sdx55_system_noc = {
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
-static struct qcom_icc_bcm *ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node *ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static const struct qcom_icc_desc sdx55_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdx55-mc-virt",
.data = &sdx55_mc_virt},
@@ -242,8 +223,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sdx55_mem_noc},
{ .compatible = "qcom,sdx55-system-noc",
.data = &sdx55_system_noc},
- { .compatible = "qcom,sdx55-ipa-virt",
- .data = &sdx55_ipa_virt},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7c2ca52ca3e4..df5347ea450b 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -771,12 +771,12 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio, ca->bdev, REQ_OP_WRITE |
REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
- bch_bio_map(bio, w->data);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
bio->bi_private = w;
+ bch_bio_map(bio, w->data);
trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index fdd0194f84dd..320fcdfef48e 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
{
struct bio *bio = &s->bio.bio;
- bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
+ bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
/*
* bi_end_io can be set separately somewhere else, e.g. the
* variants in,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ad2d5faa2ebb..36ae30b73a6e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4399,6 +4399,7 @@ try_smaller_buffer:
}
if (ic->internal_hash) {
+ size_t recalc_tags_size;
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -4412,8 +4413,10 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
- ic->tag_size, GFP_KERNEL);
+ recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+ recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 875bca30a0dd..82f2a06153dc 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -27,7 +27,6 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/sched/clock.h>
#define DM_MSG_PREFIX "multipath historical-service-time"
@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
{
struct selector *s = ps->context;
struct path_info *pi = NULL, *best = NULL;
- u64 time_now = sched_clock();
+ u64 time_now = ktime_get_ns();
struct dm_path *ret = NULL;
unsigned long flags;
@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
static u64 path_service_time(struct path_info *pi, u64 start_time)
{
- u64 sched_now = ktime_get_ns();
+ u64 now = ktime_get_ns();
/* if a previous disk request has finished after this IO was
* sent to the hardware, pretend the submission happened
@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
if (time_after64(pi->last_finish, start_time))
start_time = pi->last_finish;
- pi->last_finish = sched_now;
- if (time_before64(sched_now, start_time))
+ pi->last_finish = now;
+ if (time_before64(now, start_time))
return 0;
- return sched_now - start_time;
+ return now - start_time;
}
static int hst_end_io(struct path_selector *ps, struct dm_path *path,
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index c1ca9be4b79e..57daa86c19cf 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
return 0;
}
+struct orig_bio_details {
+ unsigned int op;
+ unsigned int nr_sectors;
+};
+
/*
* First phase of BIO mapping for targets with zone append emulation:
* check all BIO that change a zone writer pointer and change zone
* append operations into regular write operations.
*/
static bool dm_zone_map_bio_begin(struct mapped_device *md,
- struct bio *orig_bio, struct bio *clone)
+ unsigned int zno, struct bio *clone)
{
sector_t zsectors = blk_queue_zone_sectors(md->queue);
- unsigned int zno = bio_zone_no(orig_bio);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/*
@@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
}
- switch (bio_op(orig_bio)) {
+ switch (bio_op(clone)) {
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
return true;
@@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
* target zone.
*/
clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
- (orig_bio->bi_opf & (~REQ_OP_MASK));
- clone->bi_iter.bi_sector =
- orig_bio->bi_iter.bi_sector + zwp_offset;
+ (clone->bi_opf & (~REQ_OP_MASK));
+ clone->bi_iter.bi_sector += zwp_offset;
break;
default:
DMWARN_LIMIT("Invalid BIO operation");
@@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
* data written to a zone. Note that at this point, the remapped clone BIO
* may already have completed, so we do not touch it.
*/
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
- struct bio *orig_bio,
+static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
+ struct orig_bio_details *orig_bio_details,
unsigned int nr_sectors)
{
- unsigned int zno = bio_zone_no(orig_bio);
unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
/* The clone BIO may already have been completed and failed */
@@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
return BLK_STS_IOERR;
/* Update the zone wp offset */
- switch (bio_op(orig_bio)) {
+ switch (orig_bio_details->op) {
case REQ_OP_ZONE_RESET:
WRITE_ONCE(md->zwp_offset[zno], 0);
return BLK_STS_OK;
@@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
* Check that the target did not truncate the write operation
* emulating a zone append.
*/
- if (nr_sectors != bio_sectors(orig_bio)) {
+ if (nr_sectors != orig_bio_details->nr_sectors) {
DMWARN_LIMIT("Truncated write for zone append");
return BLK_STS_IOERR;
}
@@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q,
bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
}
-static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
+static bool dm_need_zone_wp_tracking(struct bio *bio)
{
/*
* Special processing is not needed for operations that do not need the
@@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
* zones and all operations that do not modify directly a sequential
* zone write pointer.
*/
- if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio))
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
- switch (bio_op(orig_bio)) {
+ switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_FINISH:
case REQ_OP_ZONE_APPEND:
- return bio_zone_is_seq(orig_bio);
+ return bio_zone_is_seq(bio);
default:
return false;
}
@@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
struct dm_target *ti = tio->ti;
struct mapped_device *md = io->md;
struct request_queue *q = md->queue;
- struct bio *orig_bio = io->orig_bio;
struct bio *clone = &tio->clone;
+ struct orig_bio_details orig_bio_details;
unsigned int zno;
blk_status_t sts;
int r;
@@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* IOs that do not change a zone write pointer do not need
* any additional special processing.
*/
- if (!dm_need_zone_wp_tracking(orig_bio))
+ if (!dm_need_zone_wp_tracking(clone))
return ti->type->map(ti, clone);
/* Lock the target zone */
- zno = bio_zone_no(orig_bio);
+ zno = bio_zone_no(clone);
dm_zone_lock(q, zno, clone);
+ orig_bio_details.nr_sectors = bio_sectors(clone);
+ orig_bio_details.op = bio_op(clone);
+
/*
* Check that the bio and the target zone write pointer offset are
* both valid, and if the bio is a zone append, remap it to a write.
*/
- if (!dm_zone_map_bio_begin(md, orig_bio, clone)) {
+ if (!dm_zone_map_bio_begin(md, zno, clone)) {
dm_zone_unlock(q, zno, clone);
return DM_MAPIO_KILL;
}
@@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* The target submitted the clone BIO. The target zone will
* be unlocked on completion of the clone.
*/
- sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+ sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+ *tio->len_ptr);
break;
case DM_MAPIO_REMAPPED:
/*
@@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
* unlock the target zone here as the clone will not be
* submitted.
*/
- sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+ sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+ *tio->len_ptr);
if (sts != BLK_STS_OK)
dm_zone_unlock(q, zno, clone);
break;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3c5fad7c4ee6..82957bd460e8 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1323,8 +1323,7 @@ static void __map_bio(struct bio *clone)
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios,
- unsigned *len)
+ struct dm_target *ti, unsigned num_bios)
{
struct bio *bio;
int try;
@@ -1335,7 +1334,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
if (try)
mutex_lock(&ci->io->md->table_devices_lock);
for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
- bio = alloc_tio(ci, ti, bio_nr, len,
+ bio = alloc_tio(ci, ti, bio_nr, NULL,
try ? GFP_NOIO : GFP_NOWAIT);
if (!bio)
break;
@@ -1363,11 +1362,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
break;
case 1:
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
- dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
break;
default:
- alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
while ((clone = bio_list_pop(&blist))) {
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone);
@@ -1392,6 +1391,7 @@ static void __send_empty_flush(struct clone_info *ci)
ci->bio = &flush_bio;
ci->sector_count = 0;
+ ci->io->tio.clone.bi_iter.bi_size = 0;
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1407,14 +1407,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
- /*
- * dm_accept_partial_bio cannot be used with duplicate bios,
- * so update clone_info cursor before __send_duplicate_bios().
- */
+ __send_duplicate_bios(ci, ti, num_bios, &len);
+
ci->sector += len;
ci->sector_count -= len;
-
- __send_duplicate_bios(ci, ti, num_bios, &len);
}
static bool is_abnormal_io(struct bio *bio)
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
index 28f2bafc14d2..5afa373e534f 100644
--- a/drivers/media/platform/nxp/Kconfig
+++ b/drivers/media/platform/nxp/Kconfig
@@ -7,6 +7,7 @@ comment "NXP media platform drivers"
config VIDEO_IMX_MIPI_CSIS
tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
depends on ARCH_MXC || COMPILE_TEST
+ depends on VIDEO_DEV
select MEDIA_CONTROLLER
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 4de5e8d2b261..3d3d1062e212 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
- if (rga->dst_mmu_pages) {
+ if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 47029746b89e..0de587b412d4 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -77,16 +77,16 @@ err_mutex_unlock:
}
static const struct si2157_tuner_info si2157_tuners[] = {
- { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
- { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
- { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
- { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
- { SI2148, true, 0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2148, true, 0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
- { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
- { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+ { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
+ { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
+ { SI2148, 0x32, true, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2148, 0x33, true, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+ { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+ { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
};
static int si2157_load_firmware(struct dvb_frontend *fe,
@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
}
}
- if (!fw_name && !fw_alt_name) {
+ if (required && !fw_name && !fw_alt_name) {
dev_err(&client->dev,
"unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index c267283b01fd..e749dcb3ddea 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
ebi->smc.regmap = syscon_node_to_regmap(smc_np);
- if (IS_ERR(ebi->smc.regmap))
- return PTR_ERR(ebi->smc.regmap);
+ if (IS_ERR(ebi->smc.regmap)) {
+ ret = PTR_ERR(ebi->smc.regmap);
+ goto put_node;
+ }
ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
- if (IS_ERR(ebi->smc.layout))
- return PTR_ERR(ebi->smc.layout);
+ if (IS_ERR(ebi->smc.layout)) {
+ ret = PTR_ERR(ebi->smc.layout);
+ goto put_node;
+ }
ebi->smc.clk = of_clk_get(smc_np, 0);
if (IS_ERR(ebi->smc.clk)) {
- if (PTR_ERR(ebi->smc.clk) != -ENOENT)
- return PTR_ERR(ebi->smc.clk);
+ if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+ ret = PTR_ERR(ebi->smc.clk);
+ goto put_node;
+ }
ebi->smc.clk = NULL;
}
+ of_node_put(smc_np);
ret = clk_prepare_enable(ebi->smc.clk);
if (ret)
return ret;
@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
}
return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+ of_node_put(smc_np);
+ return ret;
}
static __maybe_unused int atmel_ebi_resume(struct device *dev)
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2f6939da21cd..e83b61c925a4 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
}
/* legacy dts may still use "simple-bus" compatible */
- ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
- &dev->dev);
+ ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
if (ret)
goto err_free_nandirq;
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index e4cc64f56019..019a0822bde0 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -164,25 +164,39 @@ static const struct regmap_access_table rpcif_volatile_table = {
/*
- * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
- * with proper width. Requires SMENR_SPIDE to be correctly set before!
+ * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
+ * proper width. Requires rpcif.xfer_size to be correctly set before!
*/
static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct rpcif *rpc = context;
- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
-
- if (spide == 0x8) {
+ switch (reg) {
+ case RPCIF_SMRDR0:
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
*val = readb(rpc->base + reg);
return 0;
- } else if (spide == 0xC) {
+
+ case 2:
*val = readw(rpc->base + reg);
return 0;
- } else if (spide != 0xF) {
+
+ case 4:
+ case 8:
+ *val = readl(rpc->base + reg);
+ return 0;
+
+ default:
return -EILSEQ;
}
+
+ case RPCIF_SMRDR1:
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
}
*val = readl(rpc->base + reg);
@@ -193,18 +207,34 @@ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct rpcif *rpc = context;
- if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
- u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
-
- if (spide == 0x8) {
+ switch (reg) {
+ case RPCIF_SMWDR0:
+ switch (rpc->xfer_size) {
+ case 1:
writeb(val, rpc->base + reg);
return 0;
- } else if (spide == 0xC) {
+
+ case 2:
writew(val, rpc->base + reg);
return 0;
- } else if (spide != 0xF) {
+
+ case 4:
+ case 8:
+ writel(val, rpc->base + reg);
+ return 0;
+
+ default:
return -EILSEQ;
}
+
+ case RPCIF_SMWDR1:
+ if (rpc->xfer_size != 8)
+ return -EILSEQ;
+ break;
+
+ case RPCIF_SMRDR0:
+ case RPCIF_SMRDR1:
+ return -EPERM;
}
writel(val, rpc->base + reg);
@@ -469,6 +499,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
+ rpc->xfer_size = nbytes;
memcpy(data, rpc->buffer + pos, nbytes);
if (nbytes == 8) {
@@ -533,6 +564,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR,
rpc->smcr | RPCIF_SMCR_SPIE);
+ rpc->xfer_size = nbytes;
ret = wait_msg_xfer_end(rpc);
if (ret)
goto err_out;
@@ -651,6 +683,7 @@ static int rpcif_probe(struct platform_device *pdev)
struct platform_device *vdev;
struct device_node *flash;
const char *name;
+ int ret;
flash = of_get_next_child(pdev->dev.of_node, NULL);
if (!flash) {
@@ -674,7 +707,14 @@ static int rpcif_probe(struct platform_device *pdev)
return -ENOMEM;
vdev->dev.parent = &pdev->dev;
platform_set_drvdata(pdev, vdev);
- return platform_device_add(vdev);
+
+ ret = platform_device_add(vdev);
+ if (ret) {
+ platform_device_put(vdev);
+ return ret;
+ }
+
+ return 0;
}
static int rpcif_remove(struct platform_device *pdev)
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 91f96abbb3f9..8d169a35cf13 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -31,6 +31,8 @@
*/
#define FM25_SN_LEN 8 /* serial number length */
+#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
+
struct at25_data {
struct spi_eeprom chip;
struct spi_device *spi;
@@ -39,6 +41,7 @@ struct at25_data {
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
u8 sernum[FM25_SN_LEN];
+ u8 command[EE_MAXADDRLEN + 1];
};
#define AT25_WREN 0x06 /* latch the write enable */
@@ -61,8 +64,6 @@ struct at25_data {
#define FM25_ID_LEN 9 /* ID length */
-#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
-
/*
* Specs often allow 5ms for a page write, sometimes 20ms;
* it's important to recover from write timeouts.
@@ -78,7 +79,6 @@ static int at25_ee_read(void *priv, unsigned int offset,
{
struct at25_data *at25 = priv;
char *buf = val;
- u8 command[EE_MAXADDRLEN + 1];
u8 *cp;
ssize_t status;
struct spi_transfer t[2];
@@ -92,12 +92,15 @@ static int at25_ee_read(void *priv, unsigned int offset,
if (unlikely(!count))
return -EINVAL;
- cp = command;
+ cp = at25->command;
instr = AT25_READ;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
+
+ mutex_lock(&at25->lock);
+
*cp++ = instr;
/* 8/16/24-bit address is written MSB first */
@@ -116,7 +119,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
spi_message_init(&m);
memset(t, 0, sizeof(t));
- t[0].tx_buf = command;
+ t[0].tx_buf = at25->command;
t[0].len = at25->addrlen + 1;
spi_message_add_tail(&t[0], &m);
@@ -124,8 +127,6 @@ static int at25_ee_read(void *priv, unsigned int offset,
t[1].len = count;
spi_message_add_tail(&t[1], &m);
- mutex_lock(&at25->lock);
-
/*
* Read it all at once.
*
@@ -152,7 +153,7 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
spi_message_init(&m);
memset(t, 0, sizeof(t));
- t[0].tx_buf = &command;
+ t[0].tx_buf = at25->command;
t[0].len = 1;
spi_message_add_tail(&t[0], &m);
@@ -162,6 +163,8 @@ static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
mutex_lock(&at25->lock);
+ at25->command[0] = command;
+
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 0e0bcd0da852..d21486d69df2 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -46,6 +46,20 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
+config INTEL_MEI_GSC
+ tristate "Intel MEI GSC embedded device"
+ depends on INTEL_MEI
+ depends on INTEL_MEI_ME
+ depends on X86 && PCI
+ depends on DRM_I915
+ help
+ Intel auxiliary driver for GSC devices embedded in Intel graphics devices.
+
+ An MEI device here called GSC can be embedded in an
+ Intel graphics devices, to support a range of chassis
+ tasks such as graphics card firmware update and security
+ tasks.
+
source "drivers/misc/mei/hdcp/Kconfig"
source "drivers/misc/mei/pxp/Kconfig"
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index d8e5165917f2..fb740d754900 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
mei-me-objs := pci-me.o
mei-me-objs += hw-me.o
+obj-$(CONFIG_INTEL_MEI_GSC) += mei-gsc.o
+mei-gsc-objs := gsc-me.o
+
obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
mei-txe-objs := pci-txe.o
mei-txe-objs += hw-txe.o
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 67844089db21..59506ba6fc48 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -30,6 +30,12 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
#define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \
0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb)
+#define MEI_UUID_IGSC_MKHI UUID_LE(0xE2C2AFA2, 0x3817, 0x4D19, \
+ 0x9D, 0x95, 0x06, 0xB1, 0x6B, 0x58, 0x8A, 0x5D)
+
+#define MEI_UUID_IGSC_MKHI_FIX UUID_LE(0x46E0C1FB, 0xA546, 0x414F, \
+ 0x91, 0x70, 0xB7, 0xF4, 0x6D, 0x57, 0xB4, 0xAD)
+
#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \
0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
@@ -241,6 +247,23 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
mei_cldev_disable(cldev);
}
+static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
+{
+ int ret;
+
+ /* No need to enable the client if nothing is needed from it */
+ if (!cldev->bus->fw_f_fw_ver_supported)
+ return;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret)
+ return;
+
+ ret = mei_fwver(cldev);
+ if (ret < 0)
+ dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+ mei_cldev_disable(cldev);
+}
/**
* mei_wd - wd client on the bus, change protocol version
* as the API has changed.
@@ -492,6 +515,8 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
+ MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
+ MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver),
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
MEI_FIXUP(MEI_UUID_ANY, vt_support),
MEI_FIXUP(MEI_UUID_PAVP, whitelist),
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
new file mode 100644
index 000000000000..c8145e9b62b6
--- /dev/null
+++ b/drivers/misc/mei/gsc-me.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2019-2022, Intel Corporation. All rights reserved.
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+
+#include <linux/module.h>
+#include <linux/mei_aux.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "hw-me-regs.h"
+
+#include "mei-trace.h"
+
+#define MEI_GSC_RPM_TIMEOUT 500
+
+static int mei_gsc_read_hfs(const struct mei_device *dev, int where, u32 *val)
+{
+ struct mei_me_hw *hw = to_me_hw(dev);
+
+ *val = ioread32(hw->mem_addr + where + 0xC00);
+
+ return 0;
+}
+
+static int mei_gsc_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *aux_dev_id)
+{
+ struct mei_aux_device *adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+ struct device *device;
+ const struct mei_cfg *cfg;
+ int ret;
+
+ cfg = mei_me_get_cfg(aux_dev_id->driver_data);
+ if (!cfg)
+ return -ENODEV;
+
+ device = &aux_dev->dev;
+
+ dev = mei_me_dev_init(device, cfg);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hw = to_me_hw(dev);
+ hw->mem_addr = devm_ioremap_resource(device, &adev->bar);
+ if (IS_ERR(hw->mem_addr)) {
+ dev_err(device, "mmio not mapped\n");
+ ret = PTR_ERR(hw->mem_addr);
+ goto err;
+ }
+
+ hw->irq = adev->irq;
+ hw->read_fws = mei_gsc_read_hfs;
+
+ dev_set_drvdata(device, dev);
+
+ ret = devm_request_threaded_irq(device, hw->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(device, "irq register failed %d\n", ret);
+ goto err;
+ }
+
+ pm_runtime_get_noresume(device);
+ pm_runtime_set_active(device);
+ pm_runtime_enable(device);
+
+ /* Continue to char device setup in spite of firmware handshake failure.
+ * In order to provide access to the firmware status registers to the user
+ * space via sysfs.
+ */
+ if (mei_start(dev))
+ dev_warn(device, "init hw failure.\n");
+
+ pm_runtime_set_autosuspend_delay(device, MEI_GSC_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(device);
+
+ ret = mei_register(dev, device);
+ if (ret)
+ goto register_err;
+
+ pm_runtime_put_noidle(device);
+ return 0;
+
+register_err:
+ mei_stop(dev);
+ devm_free_irq(device, hw->irq, dev);
+
+err:
+ dev_err(device, "probe failed: %d\n", ret);
+ dev_set_drvdata(device, NULL);
+ return ret;
+}
+
+static void mei_gsc_remove(struct auxiliary_device *aux_dev)
+{
+ struct mei_device *dev;
+ struct mei_me_hw *hw;
+
+ dev = dev_get_drvdata(&aux_dev->dev);
+ if (!dev)
+ return;
+
+ hw = to_me_hw(dev);
+
+ mei_stop(dev);
+
+ mei_deregister(dev);
+
+ pm_runtime_disable(&aux_dev->dev);
+
+ mei_disable_interrupts(dev);
+ devm_free_irq(&aux_dev->dev, hw->irq, dev);
+}
+
+static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+
+ if (!dev)
+ return -ENODEV;
+
+ mei_stop(dev);
+
+ mei_disable_interrupts(dev);
+
+ return 0;
+}
+
+static int __maybe_unused mei_gsc_pm_resume(struct device *device)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ err = mei_restart(dev);
+ if (err)
+ return err;
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ return 0;
+}
+
+static int __maybe_unused mei_gsc_pm_runtime_idle(struct device *device)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+
+ if (!dev)
+ return -ENODEV;
+ if (mei_write_is_idle(dev))
+ pm_runtime_autosuspend(device);
+
+ return -EBUSY;
+}
+
+static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct mei_me_hw *hw;
+ int ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ if (mei_write_is_idle(dev)) {
+ hw = to_me_hw(dev);
+ hw->pg_state = MEI_PG_ON;
+ ret = 0;
+ } else {
+ ret = -EAGAIN;
+ }
+
+ mutex_unlock(&dev->device_lock);
+
+ return ret;
+}
+
+static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
+{
+ struct mei_device *dev = dev_get_drvdata(device);
+ struct mei_me_hw *hw;
+ irqreturn_t irq_ret;
+
+ if (!dev)
+ return -ENODEV;
+
+ mutex_lock(&dev->device_lock);
+
+ hw = to_me_hw(dev);
+ hw->pg_state = MEI_PG_OFF;
+
+ mutex_unlock(&dev->device_lock);
+
+ irq_ret = mei_me_irq_thread_handler(1, dev);
+ if (irq_ret != IRQ_HANDLED)
+ dev_err(dev->dev, "thread handler fail %d\n", irq_ret);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mei_gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mei_gsc_pm_suspend,
+ mei_gsc_pm_resume)
+ SET_RUNTIME_PM_OPS(mei_gsc_pm_runtime_suspend,
+ mei_gsc_pm_runtime_resume,
+ mei_gsc_pm_runtime_idle)
+};
+
+static const struct auxiliary_device_id mei_gsc_id_table[] = {
+ {
+ .name = "i915.mei-gsc",
+ .driver_data = MEI_ME_GSC_CFG,
+
+ },
+ {
+ .name = "i915.mei-gscfi",
+ .driver_data = MEI_ME_GSCFI_CFG,
+ },
+ {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(auxiliary, mei_gsc_id_table);
+
+static struct auxiliary_driver mei_gsc_driver = {
+ .probe = mei_gsc_probe,
+ .remove = mei_gsc_remove,
+ .driver = {
+ /* auxiliary_driver_register() sets .name to be the modname */
+ .pm = &mei_gsc_pm_ops,
+ },
+ .id_table = mei_gsc_id_table
+};
+module_auxiliary_driver(mei_gsc_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_ALIAS("auxiliary:i915.mei-gsc");
+MODULE_ALIAS("auxiliary:i915.mei-gscfi");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 719fee9af156..9870bf717979 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1226,6 +1226,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
me_intr_disable(dev, hcsr);
return IRQ_WAKE_THREAD;
}
+EXPORT_SYMBOL_GPL(mei_me_irq_quick_handler);
/**
* mei_me_irq_thread_handler - function called after ISR to handle the interrupt
@@ -1326,6 +1327,7 @@ end:
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
static const struct mei_hw_ops mei_me_hw_ops = {
@@ -1440,6 +1442,12 @@ static bool mei_me_fw_type_sps_ign(const struct pci_dev *pdev)
#define MEI_CFG_KIND_ITOUCH \
.kind = "itouch"
+#define MEI_CFG_TYPE_GSC \
+ .kind = "gsc"
+
+#define MEI_CFG_TYPE_GSCFI \
+ .kind = "gscfi"
+
#define MEI_CFG_FW_SPS_IGN \
.quirk_probe = mei_me_fw_type_sps_ign
@@ -1572,6 +1580,20 @@ static const struct mei_cfg mei_me_pch15_sps_cfg = {
MEI_CFG_FW_SPS_IGN,
};
+/* Graphics System Controller */
+static const struct mei_cfg mei_me_gsc_cfg = {
+ MEI_CFG_TYPE_GSC,
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
+/* Graphics System Controller Firmware Interface */
+static const struct mei_cfg mei_me_gscfi_cfg = {
+ MEI_CFG_TYPE_GSCFI,
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1592,6 +1614,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH12_SPS_ITOUCH_CFG] = &mei_me_pch12_itouch_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
[MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
+ [MEI_ME_GSC_CFG] = &mei_me_gsc_cfg,
+ [MEI_ME_GSCFI_CFG] = &mei_me_gscfi_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
@@ -1602,7 +1626,8 @@ const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
return NULL;
return mei_cfg_list[idx];
-};
+}
+EXPORT_SYMBOL_GPL(mei_me_get_cfg);
/**
* mei_me_dev_init - allocates and initializes the mei device structure
@@ -1637,4 +1662,4 @@ struct mei_device *mei_me_dev_init(struct device *parent,
return dev;
}
-
+EXPORT_SYMBOL_GPL(mei_me_dev_init);
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 00a7132ac7a2..a071c645e905 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -112,6 +112,8 @@ enum mei_cfg_idx {
MEI_ME_PCH12_SPS_ITOUCH_CFG,
MEI_ME_PCH15_CFG,
MEI_ME_PCH15_SPS_CFG,
+ MEI_ME_GSC_CFG,
+ MEI_ME_GSCFI_CFG,
MEI_ME_NUM_CFG,
};
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index e7df3dac705e..49ab3448b9b1 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -43,6 +43,7 @@
struct mtk_ecc_caps {
u32 err_mask;
+ u32 err_shift;
const u8 *ecc_strength;
const u32 *ecc_regs;
u8 num_ecc_strength;
@@ -76,7 +77,7 @@ static const u8 ecc_strength_mt2712[] = {
};
static const u8 ecc_strength_mt7622[] = {
- 4, 6, 8, 10, 12, 14, 16
+ 4, 6, 8, 10, 12
};
enum mtk_ecc_regs {
@@ -221,7 +222,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
for (i = 0; i < sectors; i++) {
offset = (i >> 2) << 2;
err = readl(ecc->regs + ECC_DECENUM0 + offset);
- err = err >> ((i % 4) * 8);
+ err = err >> ((i % 4) * ecc->caps->err_shift);
err &= ecc->caps->err_mask;
if (err == ecc->caps->err_mask) {
/* uncorrectable errors */
@@ -449,6 +450,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
+ .err_shift = 8,
.ecc_strength = ecc_strength_mt2701,
.ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
@@ -459,6 +461,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
+ .err_shift = 8,
.ecc_strength = ecc_strength_mt2712,
.ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
@@ -468,10 +471,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
- .err_mask = 0x3f,
+ .err_mask = 0x1f,
+ .err_shift = 5,
.ecc_strength = ecc_strength_mt7622,
.ecc_regs = mt7622_ecc_regs,
- .num_ecc_strength = 7,
+ .num_ecc_strength = 5,
.ecc_mode_shift = 4,
.parity_bits = 13,
.pg_irq_sel = 0,
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 1a77542c6d67..048b255faa76 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2651,10 +2651,23 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+ /* Free the initially allocated BAM transaction for reading the ONFI params */
+ if (nandc->props->is_bam)
+ free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
+ /* Now allocate the BAM transaction based on updated max_cwperpage */
+ if (nandc->props->is_bam) {
+ nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ return -ENOMEM;
+ }
+ }
+
/*
* DATA_UD_BYTES varies based on whether the read/write command protects
* spare data with ECC too. We protect spare data by default, so we set
@@ -2955,17 +2968,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
if (ret)
return ret;
- if (nandc->props->is_bam) {
- free_bam_transaction(nandc);
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- nand_cleanup(chip);
- return -ENOMEM;
- }
- }
-
ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
if (ret)
nand_cleanup(chip);
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index b85b9c6fcc42..a278829469d6 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -384,7 +384,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
dma_addr_t dma_addr;
dma_cookie_t cookie;
uint32_t reg;
- int ret;
+ int ret = 0;
+ unsigned long time_left;
if (dir == DMA_FROM_DEVICE) {
chan = flctl->chan_fifo0_rx;
@@ -425,13 +426,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
goto out;
}
- ret =
+ time_left =
wait_for_completion_timeout(&flctl->dma_complete,
msecs_to_jiffies(3000));
- if (ret <= 0) {
+ if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+ ret = -ETIMEDOUT;
}
out:
@@ -441,7 +443,7 @@ out:
dma_unmap_single(chan->device->dev, dma_addr, len, dir);
- /* ret > 0 is success */
+ /* ret == 0 is success */
return ret;
}
@@ -465,7 +467,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
/* initiate DMA transfer */
if (flctl->chan_fifo0_rx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
goto convert; /* DMA success */
/* do polling transfer */
@@ -524,7 +526,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
/* initiate DMA transfer */
if (flctl->chan_fifo0_tx && rlen >= 32 &&
- flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
+ !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
return; /* DMA success */
/* do polling transfer */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 15eddca7b4b6..38e152548126 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4027,14 +4027,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
return true;
}
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
+
/* discard lowest hash bit to deal with the common even ports pattern */
- return hash >> 1;
+ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+ return hash >> 1;
+
+ return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -4064,7 +4069,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
@@ -5259,7 +5264,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index a416240d001b..12c15da55664 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1681,9 +1681,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
- /* Configure the RMII clock as output: */
- miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
index b49d05f0e117..7a9f9ff6dedf 100644
--- a/drivers/net/dsa/mv88e6xxx/port_hidden.c
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
- return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
- MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+ return mv88e6xxx_port_wait_bit(chip,
+ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 413b0006e9a2..9e28219b223d 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
enum dsa_tag_protocol old_proto = felix->tag_proto;
+ bool cpu_port_active = false;
+ struct dsa_port *dp;
int err;
if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
proto != DSA_TAG_PROTO_OCELOT_8021Q)
return -EPROTONOSUPPORT;
+ /* We don't support multiple CPU ports, yet the DT blob may have
+ * multiple CPU ports defined. The first CPU port is the active one,
+ * the others are inactive. In this case, DSA will call
+ * ->change_tag_protocol() multiple times, once per CPU port.
+ * Since we implement the tagging protocol change towards "ocelot" or
+ * "seville" as effectively initializing the NPI port, what we are
+ * doing is effectively changing who the NPI port is to the last @cpu
+ * argument passed, which is an unused DSA CPU port and not the one
+ * that should actively pass traffic.
+ * Suppress DSA's calls on CPU ports that are inactive.
+ */
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (dp->cpu_dp->index == cpu) {
+ cpu_port_active = true;
+ break;
+ }
+ }
+
+ if (!cpu_port_active)
+ return 0;
+
felix_del_tag_protocol(ds, cpu, old_proto);
err = felix_set_tag_protocol(ds, cpu, proto);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 8d382b27e625..52a8566071ed 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 1aa79735355f..060165a85fb7 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
help
Select to enable support for Realtek Ethernet switch chips.
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO connected switch driver"
- depends on NET_DSA_REALTEK
+ tristate "Realtek MDIO interface driver"
depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI connected switch driver"
- depends on NET_DSA_REALTEK
+ tristate "Realtek SMI interface driver"
depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
tristate "Realtek RTL8365MB switch subdriver"
- depends on NET_DSA_REALTEK
- depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
tristate "Realtek RTL8366RB switch subdriver"
- depends on NET_DSA_REALTEK
- depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
- Select to enable support for Realtek RTL8366RB
+ Select to enable support for Realtek RTL8366RB.
+
+endif
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 31e1f100e48e..c58f49d558d2 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -267,7 +267,6 @@ static const struct of_device_id realtek_mdio_of_match[] = {
#endif
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{ .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
- { .compatible = "realtek,rtl8367s", .data = &rtl8365mb_variant, },
#endif
{ /* sentinel */ },
};
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 2243d3da55b2..45992f79ec8d 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -546,20 +546,11 @@ static const struct of_device_id realtek_smi_of_match[] = {
.data = &rtl8366rb_variant,
},
#endif
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
{
.compatible = "realtek,rtl8365mb",
.data = &rtl8365mb_variant,
},
- {
- .compatible = "realtek,rtl8367s",
- .data = &rtl8365mb_variant,
- },
#endif
{ /* sentinel */ },
};
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index bd4cb9d7c35d..827993022386 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -85,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -128,8 +126,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -141,10 +140,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -164,6 +163,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -171,10 +171,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 33f1a1377588..24d715c28a35 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 797a95142d1f..3a529ee8c834 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -444,22 +444,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev, true);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev, false);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev, true);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev, false);
}
static const struct dev_pm_ops aq_pm_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f4774cf051c9..6ab1f3212d24 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
if (i < self->rx_rings)
aq_ring_free(&ring[AQ_VEC_RX_ID]);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c19b072f3a23..962253db25b8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
@@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2dd79af9411b..bf1ec8fdc2ad 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(value, offset);
else
- writel(value, offset);
+ writel_relaxed(value, offset);
}
static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(offset);
else
- return readl(offset);
+ return readl_relaxed(offset);
}
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
@@ -2035,6 +2035,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb;
}
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+ __skb_pull(skb, sizeof(struct status_64));
+}
+
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2141,6 +2146,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+ bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 800d5ced5800..e475be29845c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue)
unsigned int head = queue->tx_head;
unsigned int tail = queue->tx_tail;
struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue)
if (head == tail)
return;
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+ if (tbqp == head_idx)
+ return;
+
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index d5356db7539a..caf48023f8ea 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
- }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 763d2c7b5fb1..5750f9a56393 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 79afb1d7289b..9182631856d5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL;
- /* Do not support TXSTART and TX CSUM offload simutaniously */
- if (ndev->features & NETIF_F_CSUM_MASK)
- return -EBUSY;
-
/* TSD and Qbv are mutually exclusive in hardware */
if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 11227f51404c..9f33ec838b52 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
- return ret;
+ goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 7edf8569514c..928d934cb21a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
device_for_each_child_node(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
- if (!mac_cb)
+ if (!mac_cb) {
+ fwnode_handle_put(child);
return -ENOMEM;
+ }
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 0c60f41fca8a..f3c9395d8351 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
- "failed to get tqp stat, ret = %d, tx = %u.\n",
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i);
return ret;
}
@@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
- "failed to get tqp stat, ret = %d, rx = %u.\n",
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 44d9b560b337..93aeb615191d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
for (i = 0; i < ring_num; i++) {
j = 0;
- sprintf(result[j++], "%8u", i);
- sprintf(result[j++], "%9u", ring->tx_copybreak);
- sprintf(result[j++], "%3u", tx_spare->len);
- sprintf(result[j++], "%3u", tx_spare->next_to_use);
- sprintf(result[j++], "%3u", tx_spare->next_to_clean);
- sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u", ring->tx_copybreak);
+ sprintf(result[j++], "%u", tx_spare->len);
+ sprintf(result[j++], "%u", tx_spare->next_to_use);
+ sprintf(result[j++], "%u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%u", tx_spare->last_to_clean);
sprintf(result[j++], "%pad", &tx_spare->dma);
hns3_dbg_fill_content(content, sizeof(content),
tx_spare_info_items,
@@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
+ sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%9u", ring->rx_copybreak);
+ sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
@@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG));
- sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
@@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%5d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
@@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%6d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
sprintf(result[j++], "%#x",
le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%10u",
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
+ sprintf(result[j++], "%u",
le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
}
static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 14dc12c2155d..a3ee7875d6a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5203,6 +5203,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
}
+static void hns3_state_uninit(struct hnae3_handle *handle)
+{
+ struct hns3_nic_priv *priv = handle->priv;
+
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -5320,7 +5327,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_reg_netdev_fail:
+ hns3_state_uninit(handle);
hns3_dbg_uninit(handle);
+ hns3_client_stop(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 6799d16de34b..7998ca617a92 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -94,6 +94,13 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
enum hclge_comm_cmd_status status;
struct hclge_desc desc;
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
@@ -176,7 +183,7 @@ static int hclge_get_ring_chain_from_mbx(
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
- return -ENOMEM;
+ return -EINVAL;
for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
@@ -587,9 +594,9 @@ static int hclge_set_vf_mtu(struct hclge_vport *vport,
return hclge_set_vport_mtu(vport, mtu);
}
-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
@@ -599,17 +606,18 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid);
- return;
+ return -EINVAL;
}
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
+ return 0;
}
-static void hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
@@ -627,13 +635,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
- return;
+ return -EINVAL;
}
memcpy(resp_msg->data,
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -809,10 +818,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"VF fail(%d) to set mtu\n", ret);
break;
case HCLGE_MBX_GET_QID_IN_PF:
- hclge_get_queue_id_in_pf(vport, req, &resp_msg);
+ ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_RSS_KEY:
- hclge_get_rss_key(vport, req, &resp_msg);
+ ret = hclge_get_rss_key(vport, req, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 77683909ca3d..5c5931dba51d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3210,13 +3210,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
- } else {
- ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- }
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -3231,23 +3226,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
+ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
+ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+ netdev_err(netdev, "Invalid request.\n");
+ netdev_err(netdev, "Max tx buffers = %llu\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ netdev_err(netdev, "Max rx buffers = %llu\n",
+ adapter->max_tx_entries_per_subcrq);
+ return -EINVAL;
+ }
+
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
- adapter->req_tx_entries_per_subcrq != ring->tx_pending))
- netdev_info(netdev,
- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- ring->rx_pending, ring->tx_pending,
- adapter->req_rx_add_entries_per_subcrq,
- adapter->req_tx_entries_per_subcrq);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -3255,14 +3248,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
- } else {
- channels->max_rx = IBMVNIC_MAX_QUEUES;
- channels->max_tx = IBMVNIC_MAX_QUEUES;
- }
-
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -3275,22 +3262,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_queues != channels->rx_count ||
- adapter->req_tx_queues != channels->tx_count))
- netdev_info(netdev,
- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- channels->rx_count, channels->tx_count,
- adapter->req_rx_queues, adapter->req_tx_queues);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3298,43 +3274,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
- i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ if (stringset != ETH_SS_STATS)
+ return;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
- }
- break;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- ibmvnic_priv_flags[i]);
- break;
- default:
- return;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
}
}
@@ -3347,8 +3312,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
- case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -3401,26 +3364,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
- return adapter->priv_flags;
-}
-
-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
-
- if (which_maxes)
- adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
- else
- adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
-
- return 0;
-}
-
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -3434,8 +3377,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
- .get_priv_flags = ibmvnic_get_priv_flags,
- .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 8f5cefb932dd..1310c861bf83 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -41,11 +41,6 @@
#define IBMVNIC_RESET_DELAY 100
-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
-#define IBMVNIC_USE_SERVER_MAXES 0x1
- "use-server-maxes"
-};
-
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -883,7 +878,6 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
- u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index d60e2016d03c..e6c8e6d5234f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
- u16 lat_enc_d = 0; /* latency decoded */
+ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 190590d32faf..7dfcf78b57fb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2871,7 +2871,6 @@ continue_reset:
running = adapter->state == __IAVF_RUNNING;
if (running) {
- netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -2988,7 +2987,7 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
- netdev->flags |= IFF_UP;
+
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
@@ -3004,10 +3003,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running) {
+ if (running)
iavf_change_state(adapter, __IAVF_RUNNING);
- netdev->flags |= IFF_UP;
- }
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 5daade32ea62..fba178e07600 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
- if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ if (!vsi || vsi->type != ICE_VSI_PF)
return;
netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
int base_idx, i;
if (!vsi || vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ return 0;
pf = vsi->back;
netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
if (!pf_vsi)
return;
- ice_free_cpu_rx_rmap(pf_vsi);
ice_clear_arfs(pf_vsi);
}
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
return;
ice_remove_arfs(pf);
- if (ice_set_cpu_rx_rmap(pf_vsi)) {
- dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
- return;
- }
ice_init_arfs(pf_vsi);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9a84d746a6c4..6a463b242c7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
- if (ice_is_reset_in_progress(vsi->back->state))
+ if (ice_is_reset_in_progress(vsi->back->state) ||
+ test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
repr = ice_netdev_to_repr(netdev);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index bd58d9d2e565..6a413331572b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int ice_eswitch_rebuild(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 2774cbd5b12a..6d19c58ccacd 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2689,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
+ ice_free_cpu_rx_rmap(vsi);
+
ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2702,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
continue;
/* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d768925785ca..9a0a358a15c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2510,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
+ err = ice_set_cpu_rx_rmap(vsi);
+ if (err) {
+ netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+ vsi->vsi_num, ERR_PTR(err));
+ goto free_q_irqs;
+ }
+
vsi->irqs_ready = true;
return 0;
@@ -3692,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
*/
ice_napi_add(vsi);
- status = ice_set_cpu_rx_rmap(vsi);
- if (status) {
- dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
- vsi->vsi_num, status);
- goto unroll_napi_add;
- }
status = ice_init_mac_fltr(pf);
if (status)
- goto free_cpu_rx_map;
+ goto unroll_napi_add;
return 0;
-free_cpu_rx_map:
- ice_free_cpu_rx_rmap(vsi);
unroll_napi_add:
ice_tc_indir_block_unregister(vsi);
unroll_cfg_netdev:
@@ -5167,7 +5166,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
- ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
@@ -6931,12 +6929,15 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
+#define ICE_EMP_RESET_SLEEP_MS 5000
if (reset_type == ICE_RESET_EMPR) {
/* If an EMP reset has occurred, any previously pending flash
* update will have completed. We no longer know whether or
* not the NVM update EMP reset is restricted.
*/
pf->fw_emp_reset_disabled = false;
+
+ msleep(ICE_EMP_RESET_SLEEP_MS);
}
err = ice_init_all_ctrlq(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 4eb0599714f4..13cdb5ea594d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
orom_data, hw->flash.banks.orom_size);
if (status) {
+ vfree(orom_data);
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 8915a9d39e36..0c438219f7a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1046,8 +1046,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
- ice_mbx_deinit_snapshot(&pf->hw);
ice_free_vfs(pf);
+ ice_mbx_deinit_snapshot(&pf->hw);
if (pf->lag)
ice_enable_lag(pf->lag);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 69ff4b929772..b72606c9e6d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -3625,6 +3625,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
return;
}
+ mutex_lock(&vf->cfg_lock);
+
/* Check if VF is disabled. */
if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
err = -EPERM;
@@ -3642,32 +3644,20 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EINVAL;
}
- if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
- ice_vc_send_msg_to_vf(vf, v_opcode,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
- 0);
- ice_put_vf(vf);
- return;
- }
-
error_handler:
if (err) {
ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
NULL, 0);
dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
- ice_put_vf(vf);
- return;
+ goto finish;
}
- /* VF is being configured in another context that triggers a VFR, so no
- * need to process this message
- */
- if (!mutex_trylock(&vf->cfg_lock)) {
- dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
- vf->vf_id);
- ice_put_vf(vf);
- return;
+ if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+ ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+ 0);
+ goto finish;
}
switch (v_opcode) {
@@ -3780,6 +3770,7 @@ error_handler:
vf_id, v_opcode, err);
}
+finish:
mutex_unlock(&vf->cfg_lock);
ice_put_vf(vf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 866ee4df9671..9dd38f667059 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -415,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*/
static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
- u32 nb_buffs_extra = 0, nb_buffs;
u16 ntu = rx_ring->next_to_use;
u16 total_count = count;
struct xdp_buff **xdp;
@@ -428,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
rx_desc,
rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
rx_desc = ICE_RX_DESC(rx_ring, 0);
xdp = ice_xdp_buf(rx_ring, 0);
ntu = 0;
@@ -441,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
if (ntu == rx_ring->count)
ntu = 0;
+exit:
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index 66ea566488d1..59d5c467ea6e 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 40dbf4b43234..6961f65d36b9 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0d6e3215e98f..653e9f1e35b5 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_EN;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- if (pci_device_is_present(adapter->pdev))
+ if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
+ igc_ptm_stop(adapter);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index e596e1a9fc75..69d11ff7677d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
- if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+ sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
+ if (sam->flags != XFRM_OFFLOAD_INBOUND) {
err = -EOPNOTSUPP;
goto err_out;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 939b692ffc33..ce843ea91464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
return 0;
errout:
+ mutex_destroy(&mlxsw_i2c->cmd.lock);
i2c_set_clientdata(client, NULL);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index b73466470f75..fe663b0ab708 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index ce5970bdcc6a..005e56ea5da1 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
/* If the entry in SW is found, then there is nothing
* to do
@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
lan966x_mac_process_raw_entry(&raw_entries[column],
mac, &vid, &dest_idx);
- WARN_ON(dest_idx > lan966x->num_phys_ports);
+ if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
+ continue;
mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
if (!mac_entry)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 1f8c67f0261b..95830e3e2b1f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -446,6 +446,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
return true;
+ if (eth_type_vlan(skb->protocol)) {
+ skb = skb_vlan_untag(skb);
+ if (unlikely(!skb))
+ return false;
+ }
+
if (skb->protocol == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_IGMP)
return false;
@@ -665,6 +671,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
disable_irq(lan966x->ana_irq);
lan966x->ana_irq = -ENXIO;
}
+
+ if (lan966x->ptp_irq)
+ devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
}
static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index ae782778d6dd..0a1041da4384 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -29,10 +29,10 @@ enum {
static u64 lan966x_ptp_get_nominal_value(void)
{
- u64 res = 0x304d2df1;
-
- res <<= 32;
- return res;
+ /* This is the default value that for each system clock, the time of day
+ * is increased. It has the format 5.59 nanosecond.
+ */
+ return 0x304d4873ecade305;
}
int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
index e3555c94294d..df2bee678559 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
@@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
switchdev_bridge_port_unoffload(port->dev, port,
- &lan966x_switchdev_nb,
- &lan966x_switchdev_blocking_nb);
+ NULL, NULL);
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index e443bd8b2d09..ca71b62a44dc 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -551,7 +551,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter;
- int err;
+ int err = 0;
u32 val;
list_for_each_entry(filter, &block->rules, list) {
@@ -570,7 +570,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
if (vlan_aware)
err = ocelot_del_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge);
- else
+ else if (ocelot_port->bridge)
err = ocelot_add_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge);
if (err)
@@ -629,6 +629,13 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
+ /* Ignore VID 0 added to our RX filter by the 8021q module, since
+ * that collides with OCELOT_STANDALONE_PVID and changes it from
+ * egress-untagged to egress-tagged.
+ */
+ if (!vid)
+ return 0;
+
err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
@@ -651,6 +658,9 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
bool del_pvid = false;
int err;
+ if (!vid)
+ return 0;
+
if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
del_pvid = true;
@@ -2859,6 +2869,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
index cd478d2cd871..00f6d347eaf7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -57,10 +57,6 @@
#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define TSE_PCS_IF_USE_SGMII 0x03
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-#define SGMII_ADAPTER_ENABLE 0x0000
-
#define AUTONEGO_LINK_TIMER 20
static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
unsigned int speed)
{
void __iomem *tse_pcs_base = pcs->tse_pcs_base;
- void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
u32 val;
- writew(SGMII_ADAPTER_ENABLE,
- sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
pcs->autoneg = phy_dev->autoneg;
if (phy_dev->autoneg == AUTONEG_ENABLE) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
index 442812c0a4bd..694ac25ef426 100644
--- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -10,6 +10,10 @@
#include <linux/phy.h>
#include <linux/timer.h>
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_ENABLE 0x0000
+#define SGMII_ADAPTER_DISABLE 0x0001
+
struct tse_pcs {
struct device *dev;
void __iomem *tse_pcs_base;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index b7c2579c963b..6b447d8f0bd8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -18,9 +18,6 @@
#include "altr_tse_pcs.h"
-#define SGMII_ADAPTER_CTRL_REG 0x00
-#define SGMII_ADAPTER_DISABLE 0x0001
-
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,14 +59,13 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
- void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
struct device *dev = dwmac->dev;
struct net_device *ndev = dev_get_drvdata(dev);
struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if ((tse_pcs_base) && (sgmii_adapter_base))
+ if (sgmii_adapter_base)
writew(SGMII_ADAPTER_DISABLE,
sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
@@ -93,8 +89,11 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- if (tse_pcs_base && sgmii_adapter_base)
+ if (phy_dev && sgmii_adapter_base) {
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+ }
}
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 22fea0f67245..92d32940aff0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
writel(value, ioaddr + PTP_TCR);
/* wait for present system time initialize to complete */
- return readl_poll_timeout(ioaddr + PTP_TCR, value,
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
!(value & PTP_TCR_TSINIT),
- 10000, 100000);
+ 10, 100000);
}
static int config_addend(void __iomem *ioaddr, u32 addend)
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 16105292b140..74e845fa2e07 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1355,7 +1355,9 @@ static int rr_close(struct net_device *dev)
rrpriv->fw_running = 0;
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
del_timer_sync(&rrpriv->timer);
+ spin_lock_irqsave(&rrpriv->lock, flags);
writel(0, &regs->TxPi);
writel(0, &regs->IpRxPi);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 069e8824c264..b00bc8173abe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
*pskb = skb;
eth = eth_hdr(skb);
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
src = macvlan_hash_lookup(port, eth->h_source);
if (src && src->mode != MACVLAN_MODE_VEPA &&
src->mode != MACVLAN_MODE_BRIDGE) {
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
return RX_HANDLER_PASS;
}
- if (macvlan_forward_source(skb, port, eth->h_source))
+ if (macvlan_forward_source(skb, port, eth->h_source)) {
+ kfree_skb(skb);
return RX_HANDLER_CONSUMED;
+ }
if (macvlan_passthru(port))
vlan = list_first_or_null_rcu(&port->vlans,
struct macvlan_dev, list);
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 1becb1a731f6..1c1584fca632 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
int rc;
rc = fwnode_irq_get(child, 0);
+ /* Don't wait forever if the IRQ provider doesn't become available,
+ * just fall back to poll mode
+ */
+ if (rc == -EPROBE_DEFER)
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
if (rc == -EPROBE_DEFER)
return rc;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index b6fea119fe13..2b7d0720720b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -880,7 +880,7 @@ static int mv3310_read_status_copper(struct phy_device *phydev)
cssr1 = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_CSSR1);
if (cssr1 < 0)
- return val;
+ return cssr1;
/* If the link settings are not resolved, mark the link down */
if (!(cssr1 & MV_PCS_CSSR1_RESOLVED)) {
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 389df3f4293c..c2c0e361fd3d 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
u16 ctl = 0;
- int rc;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return -EOPNOTSUPP;
}
- rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
- if (rc == 1)
- rc = genphy_soft_reset(phydev);
-
- return rc;
+ return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
}
static struct phy_driver microchip_t1_phy_driver[] = {
@@ -748,6 +743,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
.name = "Microchip LAN937x T1",
+ .flags = PHY_POLL_CABLE_TEST,
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 276a0e42ca8e..dbe4c0a4be2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* NETIF_F_LLTX requires to do our own update of trans_start */
queue = netdev_get_tx_queue(dev, txq);
- queue->trans_start = jiffies;
+ txq_trans_cond_update(queue);
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 1b5714926d81..eb0121a64d6d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
rcv = rcu_dereference(priv->peer);
- if (unlikely(!rcv)) {
+ if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
kfree_skb(skb);
goto drop;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 87838cbe38cf..cbba9d2e8f32 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1005,6 +1005,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
* xdp.data_meta were adjusted
*/
len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
+
+ /* recalculate headroom if xdp.data or xdp_data_meta
+ * were adjusted, note that offset should always point
+ * to the start of the reserved bytes for virtio_net
+ * header which are followed by xdp.data, that means
+ * that offset is equal to the headroom (when buf is
+ * starting at the beginning of the page, otherwise
+ * there is a base offset inside the page) but it's used
+ * with a different starting point (buf start) than
+ * xdp.data (buf start + vnet hdr size). If xdp.data or
+ * data_meta were adjusted by the xdp prog then the
+ * headroom size has changed and so has the offset, we
+ * can use data_hard_start, which points at buf start +
+ * vnet hdr size, to calculate the new headroom and use
+ * it later to compute buf start in page_to_skb()
+ */
+ headroom = xdp.data - xdp.data_hard_start - metasize;
+
/* We can only create skb based on xdp_page. */
if (unlikely(xdp_page != page)) {
rcu_read_unlock();
@@ -1012,7 +1030,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = page_to_skb(vi, rq, xdp_page, offset,
len, PAGE_SIZE, false,
metasize,
- VIRTIO_XDP_HEADROOM);
+ headroom);
return head_skb;
}
break;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index de97ff98d36e..8a5e3a6d32d7 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
- return -ENOBUFS;
+ return -ENOMEM;
if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
kfree(rd);
- return -ENOBUFS;
+ return -ENOMEM;
}
rd->remote_ip = *ip;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 23d2954d9747..1e5672019922 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -349,7 +349,7 @@ static int __init cosa_init(void)
}
} else {
cosa_major = register_chrdev(0, "cosa", &cosa_fops);
- if (!cosa_major) {
+ if (cosa_major < 0) {
pr_warn("unable to register chardev\n");
err = -EIO;
goto out;
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 0fad1331303c..aa9a7a5970fd 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -19,6 +19,7 @@
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/suspend.h>
+#include <net/dst_metadata.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/ip_tunnels.h>
@@ -167,7 +168,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
goto err_peer;
}
- mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
+ mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index d5b83f90d27a..e6b34b0d61bd 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
arvif->do_not_send_tmpl = true;
else
arvif->do_not_send_tmpl = false;
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
}
if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (arvif->is_up && vif->bss_conf.he_support &&
vif->bss_conf.he_oper.params) {
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- WMI_VDEV_PARAM_BA_MODE,
- WMI_BA_MODE_BUFFER_SIZE_256);
- if (ret)
- ath11k_warn(ar->ab,
- "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
- arvif->vdev_id);
-
param_id = WMI_VDEV_PARAM_HEOPS_0_31;
param_value = vif->bss_conf.he_oper.params;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 98090e40e1cf..e2791d45f5f5 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
- fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+ fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
if (fi->keyix == keyix)
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d0caf1de2bde..db83cc4ba810 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
BUILD_BUG_ON(sizeof(struct ath_frame_info) >
- sizeof(tx_info->rate_driver_data));
- return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+ sizeof(tx_info->status.status_driver_data));
+ return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
}
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2542,6 +2542,16 @@ skip_tx_complete:
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+ void *ptr = &tx_info->status;
+
+ memset(ptr + sizeof(tx_info->status.rates), 0,
+ sizeof(tx_info->status) -
+ sizeof(tx_info->status.rates) -
+ sizeof(tx_info->status.status_driver_data));
+}
+
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, int nframes, int nbad,
int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
+ ath_clear_tx_status(tx_info);
+
if (txok)
tx_info->status.ack_signal = ts->ts_rssi;
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.ampdu_len = nframes;
tx_info->status.ampdu_ack_len = nframes - nbad;
+ tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+ for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+ tx_info->status.rates[i].count = 0;
+ tx_info->status.rates[i].idx = -1;
+ }
+
if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
(tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
/*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
tx_info->status.rates[tx_rateindex].count =
hw->max_rate_tries;
}
-
- for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
- tx_info->status.rates[i].count = 0;
- tx_info->status.rates[i].idx = -1;
- }
-
- tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
- /* we report airtime in ath_tx_count_airtime(), don't report twice */
- tx_info->status.tx_time = 0;
}
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 55285cad527f..212fbbe1cd7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 8a22ee581674..df85ebc6e1df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
- mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+ mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index efb85c6d8e2d..e1846d04817f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
- if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+ if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
nvme_log_error(req);
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
+ req->rq_flags |= RQF_QUIET;
ret = nvme_execute_rq(req, at_head);
if (result && ret >= 0)
*result = nvme_req(req)->result;
@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_EUI64_LEN;
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
return NVME_NIDT_EUI64_LEN;
case NVME_NIDT_NGUID:
@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_NGUID_LEN;
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
return NVME_NIDT_NGUID_LEN;
case NVME_NIDT_UUID:
@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
warn_str, cur->nidl);
return -1;
}
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+ return NVME_NIDT_UUID_LEN;
uuid_copy(&ids->uuid, data + sizeof(*cur));
return NVME_NIDT_UUID_LEN;
case NVME_NIDT_CSI:
@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if ((*id)->ncap == 0) /* namespace not allocated or attached */
goto out_free_id;
- if (ctrl->vs >= NVME_VS(1, 1, 0) &&
- !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
- memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
- if (ctrl->vs >= NVME_VS(1, 2, 0) &&
- !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
- memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+ if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+ dev_info(ctrl->device,
+ "Ignoring bogus Namespace Identifiers\n");
+ } else {
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+ !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+ memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+ if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+ !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+ memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+ }
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 1393bbf82d71..a2b53ca63335 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -144,6 +144,11 @@ enum nvme_quirks {
* encoding the generation sequence number.
*/
NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
+
+ /*
+ * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+ */
+ NVME_QUIRK_BOGUS_NID = (1 << 18),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d817ca17463e..3aacf1c0d5a5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS |
- NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ NVME_QUIRK_DISABLE_WRITE_ZEROES |
+ NVME_QUIRK_BOGUS_NID, },
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
{ PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+ { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9694370651fa..59d3980b8ca2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -400,6 +400,9 @@ validate_group(struct perf_event *event)
if (!validate_event(event->pmu, &fake_pmu, leader))
return -EINVAL;
+ if (event == leader)
+ return 0;
+
for_each_sibling_event(sibling, leader) {
if (!validate_event(event->pmu, &fake_pmu, sibling))
return -EINVAL;
@@ -489,12 +492,7 @@ __hw_perf_event_init(struct perf_event *event)
local64_set(&hwc->period_left, hwc->sample_period);
}
- if (event->group_leader != event) {
- if (validate_group(event) != 0)
- return -EINVAL;
- }
-
- return 0;
+ return validate_group(event);
}
static int armpmu_event_init(struct perf_event *event)
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index 5b471ab80fe2..54d65a6f0fcc 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -414,19 +414,19 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
ret = clk_prepare_enable(priv->clk_ref);
if (ret)
- goto err_disable_clk_ref;
+ return ret;
priv->reset = devm_reset_control_array_get_exclusive(dev);
- if (IS_ERR(priv->reset))
- return PTR_ERR(priv->reset);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto err_disable_clk_ref;
+ }
priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops);
if (IS_ERR(priv->phy)) {
ret = PTR_ERR(priv->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to create PHY\n");
-
- return ret;
+ dev_err_probe(dev, ret, "failed to create PHY\n");
+ goto err_disable_clk_ref;
}
phy_set_drvdata(priv->phy, priv);
@@ -434,8 +434,12 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
phy_g12a_usb3_pcie_xlate);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto err_disable_clk_ref;
+ }
- return PTR_ERR_OR_ZERO(phy_provider);
+ return 0;
err_disable_clk_ref:
clk_disable_unprepare(priv->clk_ref);
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 5172971f4c36..3cd4d51c247c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -629,7 +629,8 @@ idle:
cleanup:
if (error < 0)
phy_mdm6600_device_power_off(ddata);
-
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
return error;
}
diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c
index 9ec234243f7c..595adba5fb8f 100644
--- a/drivers/phy/samsung/phy-exynos5250-sata.c
+++ b/drivers/phy/samsung/phy-exynos5250-sata.c
@@ -187,6 +187,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
return -EINVAL;
sata_phy->client = of_find_i2c_device_by_node(node);
+ of_node_put(node);
if (!sata_phy->client)
return -EPROBE_DEFER;
@@ -195,20 +196,21 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl");
if (IS_ERR(sata_phy->phyclk)) {
dev_err(dev, "failed to get clk for PHY\n");
- return PTR_ERR(sata_phy->phyclk);
+ ret = PTR_ERR(sata_phy->phyclk);
+ goto put_dev;
}
ret = clk_prepare_enable(sata_phy->phyclk);
if (ret < 0) {
dev_err(dev, "failed to enable source clk\n");
- return ret;
+ goto put_dev;
}
sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops);
if (IS_ERR(sata_phy->phy)) {
- clk_disable_unprepare(sata_phy->phyclk);
dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(sata_phy->phy);
+ ret = PTR_ERR(sata_phy->phy);
+ goto clk_disable;
}
phy_set_drvdata(sata_phy->phy, sata_phy);
@@ -216,11 +218,18 @@ static int exynos_sata_phy_probe(struct platform_device *pdev)
phy_provider = devm_of_phy_provider_register(dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
- clk_disable_unprepare(sata_phy->phyclk);
- return PTR_ERR(phy_provider);
+ ret = PTR_ERR(phy_provider);
+ goto clk_disable;
}
return 0;
+
+clk_disable:
+ clk_disable_unprepare(sata_phy->phyclk);
+put_dev:
+ put_device(&sata_phy->client->dev);
+
+ return ret;
}
static const struct of_device_id exynos_sata_phy_of_match[] = {
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index c1211c4f863c..0be727bb9f79 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -838,7 +838,7 @@ static int serdes_am654_probe(struct platform_device *pdev)
clk_err:
of_clk_del_provider(node);
-
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index 3a505fe5715a..31a775877f6e 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -215,7 +215,7 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy)
return 0;
err1:
- clk_disable(phy->wkupclk);
+ clk_disable_unprepare(phy->wkupclk);
err0:
return ret;
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index 2cbc91e535d4..f502c36f3be5 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -696,6 +696,7 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy)
}
control_pdev = of_find_device_by_node(control_node);
+ of_node_put(control_node);
if (!control_pdev) {
dev_err(dev, "Failed to get control device\n");
return -EINVAL;
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index a0cdbcadf09e..c3ab4b69ea68 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -155,7 +155,7 @@ static int tusb1210_set_mode(struct phy *phy, enum phy_mode mode, int submode)
}
#ifdef CONFIG_POWER_SUPPLY
-const char * const tusb1210_chg_det_states[] = {
+static const char * const tusb1210_chg_det_states[] = {
"CHG_DET_CONNECTING",
"CHG_DET_START_DET",
"CHG_DET_READ_DET",
@@ -537,12 +537,18 @@ static int tusb1210_probe(struct ulpi *ulpi)
tusb1210_probe_charger_detect(tusb);
tusb->phy = ulpi_phy_create(ulpi, &phy_ops);
- if (IS_ERR(tusb->phy))
- return PTR_ERR(tusb->phy);
+ if (IS_ERR(tusb->phy)) {
+ ret = PTR_ERR(tusb->phy);
+ goto err_remove_charger;
+ }
phy_set_drvdata(tusb->phy, tusb);
ulpi_set_drvdata(ulpi, tusb);
return 0;
+
+err_remove_charger:
+ tusb1210_remove_charger_detect(tusb);
+ return ret;
}
static void tusb1210_remove(struct ulpi *ulpi)
diff --git a/drivers/pinctrl/intel/pinctrl-alderlake.c b/drivers/pinctrl/intel/pinctrl-alderlake.c
index 32ba50efbceb..62dbd1e67513 100644
--- a/drivers/pinctrl/intel/pinctrl-alderlake.c
+++ b/drivers/pinctrl/intel/pinctrl-alderlake.c
@@ -14,11 +14,17 @@
#include "pinctrl-intel.h"
-#define ADL_PAD_OWN 0x0a0
-#define ADL_PADCFGLOCK 0x110
-#define ADL_HOSTSW_OWN 0x150
-#define ADL_GPI_IS 0x200
-#define ADL_GPI_IE 0x220
+#define ADL_N_PAD_OWN 0x020
+#define ADL_N_PADCFGLOCK 0x080
+#define ADL_N_HOSTSW_OWN 0x0b0
+#define ADL_N_GPI_IS 0x100
+#define ADL_N_GPI_IE 0x120
+
+#define ADL_S_PAD_OWN 0x0a0
+#define ADL_S_PADCFGLOCK 0x110
+#define ADL_S_HOSTSW_OWN 0x150
+#define ADL_S_GPI_IS 0x200
+#define ADL_S_GPI_IE 0x220
#define ADL_GPP(r, s, e, g) \
{ \
@@ -28,14 +34,28 @@
.gpio_base = (g), \
}
-#define ADL_COMMUNITY(b, s, e, g) \
+#define ADL_N_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = ADL_N_PAD_OWN, \
+ .padcfglock_offset = ADL_N_PADCFGLOCK, \
+ .hostown_offset = ADL_N_HOSTSW_OWN, \
+ .is_offset = ADL_N_GPI_IS, \
+ .ie_offset = ADL_N_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+#define ADL_S_COMMUNITY(b, s, e, g) \
{ \
.barno = (b), \
- .padown_offset = ADL_PAD_OWN, \
- .padcfglock_offset = ADL_PADCFGLOCK, \
- .hostown_offset = ADL_HOSTSW_OWN, \
- .is_offset = ADL_GPI_IS, \
- .ie_offset = ADL_GPI_IE, \
+ .padown_offset = ADL_S_PAD_OWN, \
+ .padcfglock_offset = ADL_S_PADCFGLOCK, \
+ .hostown_offset = ADL_S_HOSTSW_OWN, \
+ .is_offset = ADL_S_GPI_IS, \
+ .ie_offset = ADL_S_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
.gpps = (g), \
@@ -342,10 +362,10 @@ static const struct intel_padgroup adln_community5_gpps[] = {
};
static const struct intel_community adln_communities[] = {
- ADL_COMMUNITY(0, 0, 66, adln_community0_gpps),
- ADL_COMMUNITY(1, 67, 168, adln_community1_gpps),
- ADL_COMMUNITY(2, 169, 248, adln_community4_gpps),
- ADL_COMMUNITY(3, 249, 256, adln_community5_gpps),
+ ADL_N_COMMUNITY(0, 0, 66, adln_community0_gpps),
+ ADL_N_COMMUNITY(1, 67, 168, adln_community1_gpps),
+ ADL_N_COMMUNITY(2, 169, 248, adln_community4_gpps),
+ ADL_N_COMMUNITY(3, 249, 256, adln_community5_gpps),
};
static const struct intel_pinctrl_soc_data adln_soc_data = {
@@ -713,11 +733,11 @@ static const struct intel_padgroup adls_community5_gpps[] = {
};
static const struct intel_community adls_communities[] = {
- ADL_COMMUNITY(0, 0, 94, adls_community0_gpps),
- ADL_COMMUNITY(1, 95, 150, adls_community1_gpps),
- ADL_COMMUNITY(2, 151, 199, adls_community3_gpps),
- ADL_COMMUNITY(3, 200, 269, adls_community4_gpps),
- ADL_COMMUNITY(4, 270, 303, adls_community5_gpps),
+ ADL_S_COMMUNITY(0, 0, 94, adls_community0_gpps),
+ ADL_S_COMMUNITY(1, 95, 150, adls_community1_gpps),
+ ADL_S_COMMUNITY(2, 151, 199, adls_community3_gpps),
+ ADL_S_COMMUNITY(3, 200, 269, adls_community4_gpps),
+ ADL_S_COMMUNITY(4, 270, 303, adls_community5_gpps),
};
static const struct intel_pinctrl_soc_data adls_soc_data = {
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 8dca1ef04965..40accd110c3d 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -30,6 +30,7 @@ config PINCTRL_MTK_MOORE
select GENERIC_PINMUX_FUNCTIONS
select GPIOLIB
select OF_GPIO
+ select EINT_MTK
select PINCTRL_MTK_V2
config PINCTRL_MTK_PARIS
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 8d271c6b0ca4..5de691c630b4 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1374,10 +1374,10 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
}
irq = irq_of_parse_and_map(child, 0);
- if (irq < 0) {
- dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+ if (!irq) {
+ dev_err(pctl->dev, "No IRQ for bank %u\n", i);
of_node_put(child);
- ret = irq;
+ ret = -EINVAL;
goto err;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index a1b598b86aa9..2cb79e649fcf 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -457,95 +457,110 @@ static struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
{
+ /* gpio1b6_sel */
.num = 1,
.pin = 14,
.reg = 0x28,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio1b7_sel */
.num = 1,
.pin = 15,
.reg = 0x2c,
.bit = 0,
.mask = 0x3
}, {
+ /* gpio1c2_sel */
.num = 1,
.pin = 18,
.reg = 0x30,
.bit = 4,
.mask = 0xf
}, {
+ /* gpio1c3_sel */
.num = 1,
.pin = 19,
.reg = 0x30,
.bit = 8,
.mask = 0xf
}, {
+ /* gpio1c4_sel */
.num = 1,
.pin = 20,
.reg = 0x30,
.bit = 12,
.mask = 0xf
}, {
+ /* gpio1c5_sel */
.num = 1,
.pin = 21,
.reg = 0x34,
.bit = 0,
.mask = 0xf
}, {
+ /* gpio1c6_sel */
.num = 1,
.pin = 22,
.reg = 0x34,
.bit = 4,
.mask = 0xf
}, {
+ /* gpio1c7_sel */
.num = 1,
.pin = 23,
.reg = 0x34,
.bit = 8,
.mask = 0xf
}, {
- .num = 3,
- .pin = 12,
- .reg = 0x68,
- .bit = 8,
- .mask = 0xf
- }, {
- .num = 3,
- .pin = 13,
- .reg = 0x68,
- .bit = 12,
- .mask = 0xf
- }, {
+ /* gpio2a2_sel */
.num = 2,
.pin = 2,
- .reg = 0x608,
- .bit = 0,
- .mask = 0x7
+ .reg = 0x40,
+ .bit = 4,
+ .mask = 0x3
}, {
+ /* gpio2a3_sel */
.num = 2,
.pin = 3,
- .reg = 0x608,
- .bit = 4,
- .mask = 0x7
+ .reg = 0x40,
+ .bit = 6,
+ .mask = 0x3
}, {
+ /* gpio2c0_sel */
.num = 2,
.pin = 16,
- .reg = 0x610,
- .bit = 8,
- .mask = 0x7
+ .reg = 0x50,
+ .bit = 0,
+ .mask = 0x3
}, {
+ /* gpio3b2_sel */
.num = 3,
.pin = 10,
- .reg = 0x610,
- .bit = 0,
- .mask = 0x7
+ .reg = 0x68,
+ .bit = 4,
+ .mask = 0x3
}, {
+ /* gpio3b3_sel */
.num = 3,
.pin = 11,
- .reg = 0x610,
- .bit = 4,
- .mask = 0x7
+ .reg = 0x68,
+ .bit = 6,
+ .mask = 0x3
+ }, {
+ /* gpio3b4_sel */
+ .num = 3,
+ .pin = 12,
+ .reg = 0x68,
+ .bit = 8,
+ .mask = 0xf
+ }, {
+ /* gpio3b5_sel */
+ .num = 3,
+ .pin = 13,
+ .reg = 0x68,
+ .bit = 12,
+ .mask = 0xf
},
};
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index 4d37b817b232..a91a86628f2f 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -264,14 +264,14 @@ static const struct pinctrl_pin_desc sm6350_pins[] = {
PINCTRL_PIN(153, "GPIO_153"),
PINCTRL_PIN(154, "GPIO_154"),
PINCTRL_PIN(155, "GPIO_155"),
- PINCTRL_PIN(156, "SDC1_RCLK"),
- PINCTRL_PIN(157, "SDC1_CLK"),
- PINCTRL_PIN(158, "SDC1_CMD"),
- PINCTRL_PIN(159, "SDC1_DATA"),
- PINCTRL_PIN(160, "SDC2_CLK"),
- PINCTRL_PIN(161, "SDC2_CMD"),
- PINCTRL_PIN(162, "SDC2_DATA"),
- PINCTRL_PIN(163, "UFS_RESET"),
+ PINCTRL_PIN(156, "UFS_RESET"),
+ PINCTRL_PIN(157, "SDC1_RCLK"),
+ PINCTRL_PIN(158, "SDC1_CLK"),
+ PINCTRL_PIN(159, "SDC1_CMD"),
+ PINCTRL_PIN(160, "SDC1_DATA"),
+ PINCTRL_PIN(161, "SDC2_CLK"),
+ PINCTRL_PIN(162, "SDC2_CMD"),
+ PINCTRL_PIN(163, "SDC2_DATA"),
};
#define DECLARE_MSM_GPIO_PINS(pin) \
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index dfd805e76862..7b0576f71376 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -4,14 +4,13 @@
#
config PINCTRL_SAMSUNG
bool
- depends on OF_GPIO
+ select GPIOLIB
select PINMUX
select PINCONF
config PINCTRL_EXYNOS
bool "Pinctrl common driver part for Samsung Exynos SoCs"
- depends on OF_GPIO
- depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S5PV210 || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
@@ -26,12 +25,10 @@ config PINCTRL_EXYNOS_ARM64
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
- depends on OF_GPIO
- depends on ARCH_S3C24XX || COMPILE_TEST
+ depends on ARCH_S3C24XX || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
- depends on OF_GPIO
- depends on ARCH_S3C64XX || COMPILE_TEST
+ depends on ARCH_S3C64XX || (COMPILE_TEST && OF)
select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index d291819c2f77..cb965cf93705 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -770,7 +770,7 @@ static const struct samsung_pin_bank_data fsd_pin_banks2[] __initconst = {
EXYNOS850_PIN_BANK_EINTN(3, 0x00, "gpq0"),
};
-const struct samsung_pin_ctrl fsd_pin_ctrl[] __initconst = {
+static const struct samsung_pin_ctrl fsd_pin_ctrl[] __initconst = {
{
/* pin-controller instance 0 FSYS0 data */
.pin_banks = fsd_pin_banks0,
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 9ed764731570..f7c9459f6628 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -225,6 +225,13 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
pinctrl_gpio_free(chip->base + offset);
}
+static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+ return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+}
+
static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
@@ -232,7 +239,7 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
clk_enable(bank->clk);
- ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+ ret = stm32_gpio_get_noclk(chip, offset);
clk_disable(bank->clk);
@@ -311,8 +318,12 @@ static void stm32_gpio_irq_trigger(struct irq_data *d)
struct stm32_gpio_bank *bank = d->domain->host_data;
int level;
+ /* Do not access the GPIO if this is not LEVEL triggered IRQ. */
+ if (!(bank->irq_type[d->hwirq] & IRQ_TYPE_LEVEL_MASK))
+ return;
+
/* If level interrupt type then retrig */
- level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
+ level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq);
if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
(level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
irq_chip_retrigger_hierarchy(d);
@@ -354,6 +365,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+ unsigned long flags;
int ret;
ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
@@ -367,6 +379,10 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
return ret;
}
+ flags = irqd_get_trigger_type(irq_data);
+ if (flags & IRQ_TYPE_LEVEL_MASK)
+ clk_enable(bank->clk);
+
return 0;
}
@@ -374,6 +390,9 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
{
struct stm32_gpio_bank *bank = irq_data->domain->host_data;
+ if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK)
+ clk_disable(bank->clk);
+
gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
diff --git a/drivers/pinctrl/sunplus/sppctl_sp7021.c b/drivers/pinctrl/sunplus/sppctl_sp7021.c
index 9748345b9298..cd657760a644 100644
--- a/drivers/pinctrl/sunplus/sppctl_sp7021.c
+++ b/drivers/pinctrl/sunplus/sppctl_sp7021.c
@@ -419,7 +419,15 @@ static const struct sppctl_grp sp7021grps_prbp[] = {
EGRP("PROBE_PORT2", 2, pins_prp2),
};
+/*
+ * Due to compatible reason, the first valid item should start at the third
+ * position of the array. Please keep the first two items of the table
+ * no use (dummy).
+ */
const struct sppctl_func sppctl_list_funcs[] = {
+ FNCN("", pinmux_type_fpmx, 0x00, 0, 0),
+ FNCN("", pinmux_type_fpmx, 0x00, 0, 0),
+
FNCN("L2SW_CLK_OUT", pinmux_type_fpmx, 0x00, 0, 7),
FNCN("L2SW_MAC_SMI_MDC", pinmux_type_fpmx, 0x00, 8, 7),
FNCN("L2SW_LED_FLASH0", pinmux_type_fpmx, 0x01, 0, 7),
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 6b8b3ab8db48..3463629f8764 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = {
.remove = acerhdf_remove,
};
-/* checks if str begins with start */
-static int str_starts_with(const char *str, const char *start)
-{
- unsigned long str_len = 0, start_len = 0;
-
- str_len = strlen(str);
- start_len = strlen(start);
-
- if (str_len >= start_len &&
- !strncmp(str, start, start_len))
- return 1;
-
- return 0;
-}
-
/* check hardware */
static int __init acerhdf_check_hardware(void)
{
@@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void)
* check if actual hardware BIOS vendor, product and version
* IDs start with the strings of BIOS table entry
*/
- if (str_starts_with(vendor, bt->vendor) &&
- str_starts_with(product, bt->product) &&
- str_starts_with(version, bt->version)) {
+ if (strstarts(vendor, bt->vendor) &&
+ strstarts(product, bt->product) &&
+ strstarts(version, bt->version)) {
found = 1;
break;
}
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index e9d0dbbb2887..fa4123dbdf7f 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
static struct amd_pmc_dev pmc;
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
return 0;
}
+#ifdef CONFIG_SUSPEND
static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
{
struct smu_metrics table;
@@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
table.timein_s0i3_lastcapture);
}
+#endif
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
@@ -569,6 +573,7 @@ out_unlock:
return rc;
}
+#ifdef CONFIG_SUSPEND
static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
{
switch (dev->cpu_id) {
@@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
.prepare = amd_pmc_s2idle_prepare,
.restore = amd_pmc_s2idle_restore,
};
+#endif
static const struct pci_device_id pmc_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
@@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
return 0;
}
+#ifdef CONFIG_SUSPEND
static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
{
int err;
@@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
return 0;
}
+#endif
static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
{
@@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
if (err)
dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
amd_pmc_dbgfs_register(dev);
return 0;
@@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
{
struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
+#ifdef CONFIG_SUSPEND
acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
amd_pmc_dbgfs_unregister(dev);
pci_dev_put(dev->rdev);
mutex_destroy(&dev->lock);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2104a2621e50..0e7fbed8a50d 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -371,10 +371,14 @@ static int asus_wmi_evaluate_method_buf(u32 method_id,
switch (obj->type) {
case ACPI_TYPE_BUFFER:
- if (obj->buffer.length > size)
+ if (obj->buffer.length > size) {
err = -ENOSPC;
- if (obj->buffer.length == 0)
+ break;
+ }
+ if (obj->buffer.length == 0) {
err = -ENODATA;
+ break;
+ }
memcpy(ret_buffer, obj->buffer.pointer, obj->buffer.length);
break;
@@ -2223,9 +2227,10 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- if (err == -ENODEV || err == -ENODATA)
- return 0;
- return err;
+ pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
+ fan_dev, err);
+ /* Don't cause probe to fail on devices without fan-curves */
+ return 0;
}
*available = true;
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
index f5c72e33f9ae..05534287bc26 100644
--- a/drivers/platform/x86/barco-p50-gpio.c
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -10,7 +10,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/io.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/err.h>
diff --git a/drivers/platform/x86/dell/dell-laptop.c b/drivers/platform/x86/dell/dell-laptop.c
index 8230e7a68a5e..1321687d923e 100644
--- a/drivers/platform/x86/dell/dell-laptop.c
+++ b/drivers/platform/x86/dell/dell-laptop.c
@@ -80,6 +80,10 @@ static struct quirk_entry quirk_dell_inspiron_1012 = {
.kbd_led_not_present = true,
};
+static struct quirk_entry quirk_dell_latitude_7520 = {
+ .kbd_missing_ac_tag = true,
+};
+
static struct platform_driver platform_driver = {
.driver = {
.name = "dell-laptop",
@@ -336,6 +340,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_inspiron_1012,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Latitude 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 7520"),
+ },
+ .driver_data = &quirk_dell_latitude_7520,
+ },
{ }
};
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 658bab4b7964..e87a931eab1e 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -148,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index a46d3b53bf61..7a059e02c265 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -236,7 +236,7 @@ enum ppfear_regs {
#define ADL_LPM_STATUS_LATCH_EN_OFFSET 0x1704
#define ADL_LPM_LIVE_STATUS_OFFSET 0x1764
-const char *pmc_lpm_modes[] = {
+static const char *pmc_lpm_modes[] = {
"S0i2.0",
"S0i2.1",
"S0i2.2",
diff --git a/drivers/platform/x86/intel/sdsi.c b/drivers/platform/x86/intel/sdsi.c
index 11d14cc0ff0a..c830e98dfa38 100644
--- a/drivers/platform/x86/intel/sdsi.c
+++ b/drivers/platform/x86/intel/sdsi.c
@@ -51,6 +51,8 @@
#define MBOX_TIMEOUT_US 2000
#define MBOX_TIMEOUT_ACQUIRE_US 1000
#define MBOX_POLLING_PERIOD_US 100
+#define MBOX_ACQUIRE_NUM_RETRIES 5
+#define MBOX_ACQUIRE_RETRY_DELAY_MS 500
#define MBOX_MAX_PACKETS 4
#define MBOX_OWNER_NONE 0x00
@@ -81,7 +83,7 @@ enum sdsi_command {
struct sdsi_mbox_info {
u64 *payload;
- u64 *buffer;
+ void *buffer;
int size;
};
@@ -163,9 +165,7 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
total = 0;
loop = 0;
do {
- int offset = SDSI_SIZE_MAILBOX * loop;
- void __iomem *addr = priv->mbox_addr + offset;
- u64 *buf = info->buffer + offset / SDSI_SIZE_CMD;
+ void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
u32 packet_size;
/* Poll on ready bit */
@@ -196,7 +196,7 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
break;
}
- sdsi_memcpy64_fromio(buf, addr, round_up(packet_size, SDSI_SIZE_CMD));
+ sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
total += packet_size;
@@ -243,8 +243,8 @@ static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *in
FIELD_PREP(CTRL_PACKET_SIZE, info->size);
writeq(control, priv->control_addr);
- /* Poll on run_busy bit */
- ret = readq_poll_timeout(priv->control_addr, control, !(control & CTRL_RUN_BUSY),
+ /* Poll on ready bit */
+ ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
if (ret)
@@ -263,7 +263,7 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
{
u64 control;
u32 owner;
- int ret;
+ int ret, retries = 0;
lockdep_assert_held(&priv->mb_lock);
@@ -273,13 +273,29 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
if (owner != MBOX_OWNER_NONE)
return -EBUSY;
- /* Write first qword of payload */
- writeq(info->payload[0], priv->mbox_addr);
+ /*
+ * If there has been no recent transaction and no one owns the mailbox,
+ * we should acquire it in under 1ms. However, if we've accessed it
+ * recently it may take up to 2.1 seconds to acquire it again.
+ */
+ do {
+ /* Write first qword of payload */
+ writeq(info->payload[0], priv->mbox_addr);
+
+ /* Check for ownership */
+ ret = readq_poll_timeout(priv->control_addr, control,
+ FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
+ MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+
+ if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
+ retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
+ msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
+ continue;
+ }
- /* Check for ownership */
- ret = readq_poll_timeout(priv->control_addr, control,
- FIELD_GET(CTRL_OWNER, control) & MBOX_OWNER_INBAND,
- MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+ /* Either we got it or someone else did. */
+ break;
+ } while (true);
return ret;
}
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index c61f804dd44e..8f9c571d7257 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -212,6 +212,9 @@ static int __init intel_uncore_init(void)
const struct x86_cpu_id *id;
int ret;
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
id = x86_match_cpu(intel_uncore_cpu_ids);
if (!id)
return -ENODEV;
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index c1d9ed9b7b67..19f6b456234f 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
if (value > samsung->kbd_led.max_brightness)
value = samsung->kbd_led.max_brightness;
- else if (value < 0)
- value = 0;
samsung->kbd_led_wk = value;
queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index bce17ca97947..a01a92769c1a 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj,
if (!tlmi_priv.certificate_support)
return -EOPNOTSUPP;
- new_cert = kstrdup(buf, GFP_KERNEL);
- if (!new_cert)
- return -ENOMEM;
- /* Strip out CR if one is present */
- strip_cr(new_cert);
-
/* If empty then clear installed certificate */
- if (new_cert[0] == '\0') { /* Clear installed certificate */
- kfree(new_cert);
-
+ if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
/* Check that signature is set */
if (!setting->signature || !setting->signature[0])
return -EACCES;
@@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj,
ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
kfree(auth_str);
- if (ret)
- return ret;
- kfree(setting->certificate);
- setting->certificate = NULL;
- return count;
+ return ret ?: count;
}
+ new_cert = kstrdup(buf, GFP_KERNEL);
+ if (!new_cert)
+ return -ENOMEM;
+ /* Strip out CR if one is present */
+ strip_cr(new_cert);
+
if (setting->cert_installed) {
/* Certificate is installed so this is an update */
if (!setting->signature || !setting->signature[0]) {
@@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj,
auth_str = kasprintf(GFP_KERNEL, "%s,%s",
new_cert, setting->password);
}
- if (!auth_str) {
- kfree(new_cert);
+ kfree(new_cert);
+ if (!auth_str)
return -ENOMEM;
- }
ret = tlmi_simple_call(guid, auth_str);
kfree(auth_str);
- if (ret) {
- kfree(new_cert);
- return ret;
- }
- kfree(setting->certificate);
- setting->certificate = new_cert;
- return count;
+ return ret ?: count;
}
static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
@@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void)
kset_unregister(tlmi_priv.attribute_kset);
+ /* Free up any saved signatures */
+ kfree(tlmi_priv.pwd_admin->signature);
+ kfree(tlmi_priv.pwd_admin->save_signature);
+
/* Authentication structures */
sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_admin->kobj);
@@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void)
}
kset_unregister(tlmi_priv.authentication_kset);
-
- /* Free up any saved certificates/signatures */
- kfree(tlmi_priv.pwd_admin->certificate);
- kfree(tlmi_priv.pwd_admin->signature);
- kfree(tlmi_priv.pwd_admin->save_signature);
}
static int tlmi_sysfs_init(void)
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index 4f69df6eed07..4daba6151cd6 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -63,7 +63,6 @@ struct tlmi_pwd_setting {
int index; /*Used for HDD and NVME auth */
enum level_option level;
bool cert_installed;
- char *certificate;
char *signature;
char *save_signature;
};
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index ea02c8dcd748..d925cb137e12 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = samsung_sdi_battery_get_info(&psy->dev, value, &info);
if (!err)
goto out_ret_pointer;
+ else if (err == -ENODEV)
+ /*
+ * Device does not have a static battery.
+ * Proceed to look for a simple battery.
+ */
+ err = 0;
if (strcmp("simple-battery", value)) {
err = -ENODEV;
diff --git a/drivers/power/supply/samsung-sdi-battery.c b/drivers/power/supply/samsung-sdi-battery.c
index 9d59f277f519..b33daab798b9 100644
--- a/drivers/power/supply/samsung-sdi-battery.c
+++ b/drivers/power/supply/samsung-sdi-battery.c
@@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
.constant_charge_current_max_ua = 900000,
.constant_charge_voltage_max_uv = 4200000,
.charge_term_current_ua = 200000,
+ .charge_restart_voltage_uv = 4170000,
.maintenance_charge = samsung_maint_charge_table,
.maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
.alert_low_temp_charge_current_ua = 300000,
@@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
.constant_charge_current_max_ua = 1500000,
.constant_charge_voltage_max_uv = 4350000,
.charge_term_current_ua = 120000,
+ .charge_restart_voltage_uv = 4300000,
.maintenance_charge = samsung_maint_charge_table,
.maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
.alert_low_temp_charge_current_ua = 300000,
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 1e8315038850..a8dde4606360 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get reset\n");
- reset_control_deassert(priv->rstc);
+ error = reset_control_deassert(priv->rstc);
+ if (error)
+ return error;
priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
priv->rcdev.of_reset_n_cells = 1;
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 24d3395964cc..4c5bba52b105 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 7fe7f53a41c0..6c864b093ac9 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
break;
- if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index fe86fd61a995..15fbd09baa94 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
/* Must suspend all rx queue activity for this ep */
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
}
/* CONN_DISCONNECT timeout may or may not be an issue depending
* on what transcribed in TCP layer, different targets behave
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 8c7d4dda4cf2..4365d52c6430 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
log_debug(1 << CXGBI_DBG_PDU_RX,
"csk 0x%p, conn 0x%p.\n", csk, conn);
- if (unlikely(!conn || conn->suspend_rx)) {
+ if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
log_debug(1 << CXGBI_DBG_PDU_RX,
- "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+ "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
csk, conn, conn ? conn->id : 0xFF,
- conn ? conn->suspend_rx : 0xFF);
+ conn ? conn->flags : 0xFF);
return;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index cf4211c6500d..797abf4f5399 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_task *task;
itt_t itt;
- if (session->state == ISCSI_STATE_TERMINATE)
+ if (session->state == ISCSI_STATE_TERMINATE ||
+ !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
return NULL;
if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
if (conn->stop_stage == 0)
session->state = ISCSI_STATE_FAILED;
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
return true;
}
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
* Do this after dropping the extra ref because if this was a requeue
* it's removed from that list and cleanup_queued_task would miss it.
*/
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
/*
* Save the task and ref in case we weren't cleaning up this
* task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
int rc = 0;
spin_lock_bh(&conn->session->frwd_lock);
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto fault;
}
- if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_REQUEUE << 16;
goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
void iscsi_suspend_queue(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->session->frwd_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
spin_unlock_bh(&conn->session->frwd_lock);
}
EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
struct Scsi_Host *shost = conn->session->host;
struct iscsi_host *ihost = shost_priv(shost);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
if (ihost->workq)
flush_workqueue(ihost->workq);
}
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
static void iscsi_start_tx(struct iscsi_conn *conn)
{
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
iscsi_conn_queue_work(conn);
}
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
iscsi_suspend_tx(conn);
spin_lock_bh(&session->frwd_lock);
+ clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
if (!is_active) {
/*
* if logout timed out before userspace could even send a PDU
@@ -3317,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
spin_lock_bh(&session->frwd_lock);
if (is_leading)
session->leadconn = conn;
+
+ set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
spin_unlock_bh(&session->frwd_lock);
/*
@@ -3329,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
/*
* Unblock xmitworker(), Login Phase will pass through.
*/
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+ clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_bind);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 2e9ffe3d1a55..883005757ddb 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
*/
conn->last_recv = jiffies;
- if (unlikely(conn->suspend_rx)) {
+ if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
ISCSI_DBG_TCP(conn, "Rx suspended!\n");
*status = ISCSI_TCP_SUSPENDED;
return 0;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index f90b707c190b..01c5e8ff4cc5 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01;
+ /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+ if (pm8001_ha->max_q_num > 32)
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ 1 << 16;
/* Disable end to end CRC checking */
pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
if (0x0000 != gst_len_mpistate)
return -EBUSY;
+ /*
+ * As per controller datasheet, after successful MPI
+ * initialization minimum 500ms delay is required before
+ * issuing commands.
+ */
+ msleep(500);
+
return 0;
}
@@ -1727,10 +1738,11 @@ static void
pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- mask = (u32)(1 << vec);
-
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+ if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+ else
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
#ifdef PM8001_USE_MSIX
- u32 mask;
- if (vec == 0xFF)
- mask = 0xFFFFFFFF;
+ if (vec == 0xFF) {
+ /* disable all vectors 0-31, 32-63 */
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+ } else if (vec < 32)
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
else
- mask = (u32)(1 << vec);
- pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+ pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+ 1U << (vec - 32));
return;
#endif
pm80xx_chip_intx_interrupt_disable(pm8001_ha);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 8196f89f404e..31ec429104e2 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
return qedi_iscsi_send_ioreq(task);
}
+static void qedi_offload_work(struct work_struct *work)
+{
+ struct qedi_endpoint *qedi_ep =
+ container_of(work, struct qedi_endpoint, offload_work);
+ struct qedi_ctx *qedi;
+ int wait_delay = 5 * HZ;
+ int ret;
+
+ qedi = qedi_ep->qedi;
+
+ ret = qedi_iscsi_offload_conn(qedi_ep);
+ if (ret) {
+ QEDI_ERR(&qedi->dbg_ctx,
+ "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+ qedi_ep->iscsi_cid, qedi_ep, ret);
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ return;
+ }
+
+ ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+ (qedi_ep->state ==
+ EP_STATE_OFLDCONN_COMPL),
+ wait_delay);
+ if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+ qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+ qedi_ep->iscsi_cid, qedi_ep);
+ }
+}
+
static struct iscsi_endpoint *
qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
}
qedi_ep = ep->dd_data;
memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+ INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
qedi_ep->state = EP_STATE_IDLE;
qedi_ep->iscsi_cid = (u32)-1;
qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
qedi_ep = ep->dd_data;
qedi = qedi_ep->qedi;
+ flush_work(&qedi_ep->offload_work);
+
if (qedi_ep->state == EP_STATE_OFLDCONN_START)
goto ep_exit_recover;
- if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
- flush_work(&qedi_ep->offload_work);
-
if (qedi_ep->conn) {
qedi_conn = qedi_ep->conn;
abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
return rc;
}
-static void qedi_offload_work(struct work_struct *work)
-{
- struct qedi_endpoint *qedi_ep =
- container_of(work, struct qedi_endpoint, offload_work);
- struct qedi_ctx *qedi;
- int wait_delay = 5 * HZ;
- int ret;
-
- qedi = qedi_ep->qedi;
-
- ret = qedi_iscsi_offload_conn(qedi_ep);
- if (ret) {
- QEDI_ERR(&qedi->dbg_ctx,
- "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
- qedi_ep->iscsi_cid, qedi_ep, ret);
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- return;
- }
-
- ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
- (qedi_ep->state ==
- EP_STATE_OFLDCONN_COMPL),
- wait_delay);
- if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
- qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
- QEDI_ERR(&qedi->dbg_ctx,
- "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
- qedi_ep->iscsi_cid, qedi_ep);
- }
-}
-
static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
{
struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
qedi_ep->dst_addr, qedi_ep->dst_port);
}
- INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
queue_work(qedi->offload_thread, &qedi_ep->offload_work);
ret = 0;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index ff78ef702f22..592a290e6cfa 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -32,7 +32,6 @@
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
static int sdebug_ato = DEF_ATO;
static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
static bool sdebug_random = DEF_RANDOM;
static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
static bool sdebug_clustering;
static bool sdebug_host_lock = DEF_HOST_LOCK;
static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("Exit early due to deflect_incoming\n");
- return 1;
- }
if (devip == NULL) {
devip = find_build_dev_info(sdp);
if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
}
/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
struct sdebug_defer *sd_dp;
- struct scsi_cmnd *scp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
spin_lock_irqsave(&sqp->qc_lock, iflags);
for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
if (test_bit(k, sqp->in_use_bm)) {
sqcp = &sqp->qc_arr[k];
- scp = sqcp->a_cmnd;
- if (!scp)
+ if (sqcp->a_cmnd == NULL)
continue;
devip = (struct sdebug_dev_info *)
sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
stop_qc_helper(sd_dp, l_defer_t);
- if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
- scp->result = DID_NO_CONNECT << 16;
- scsi_done(scp);
- }
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
}
}
spin_unlock(&sdebug_host_list_lock);
- stop_all_queued(false);
+ stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
}
}
-static void sdeb_block_all_queues(void)
-{
- int j;
- struct sdebug_queue *sqp;
-
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
{
int j;
struct sdebug_queue *sqp;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
- if (num_hosts < 1)
- return;
- do {
- bool found;
- unsigned long idx;
- struct sdeb_store_info *sip;
- bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
- found = false;
- if (want_phs) {
- xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
- sdeb_most_recent_idx = (int)idx;
- found = true;
- break;
- }
- if (found) /* re-use case */
- sdebug_add_host_helper((int)idx);
- else
- sdebug_do_add_host(true /* make new store */);
- } else {
- sdebug_do_add_host(false);
- }
- } while (--num_hosts);
+ atomic_set(&sqp->blocked, (int)block);
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
- u8 opcode = scp->cmnd[0];
-
- if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
- return 0;
- return DID_NO_CONNECT << 16;
-}
-
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
/* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int scsi_result,
- int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+ int (*pfp)(struct scsi_cmnd *,
+ struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
sdp = cmnd->device;
- if (delta_jiff == 0) {
- sqp = get_queue(cmnd);
- if (atomic_read(&sqp->blocked)) {
- if (smp_load_acquire(&sdebug_deflect_incoming))
- return process_deflect_incoming(cmnd);
- else
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ if (delta_jiff == 0)
goto respond_in_thread;
- }
sqp = get_queue(cmnd);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (unlikely(atomic_read(&sqp->blocked))) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- scsi_result = process_deflect_incoming(cmnd);
- goto respond_in_thread;
- }
- if (sdebug_verbose)
- pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
respond_in_thread: /* call back to mid-layer using invocation thread */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
cmnd->result &= ~SDEG_RES_IMMED_MASK;
- if (cmnd->result == 0 && scsi_result != 0) {
+ if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result;
- if (sdebug_verbose)
- pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
- blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
- }
scsi_done(cmnd);
return 0;
}
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
int j, k;
struct sdebug_queue *sqp;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
}
return res;
}
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
k = 0;
for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
atomic_set(&retired_max_queue, k + 1);
else
atomic_set(&retired_max_queue, 0);
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return count;
}
return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
/* absolute number of hosts currently active is what is shown */
- return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
}
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
+ bool found;
+ unsigned long idx;
+ struct sdeb_store_info *sip;
+ bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
int delta_hosts;
- if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+ if (sscanf(buf, "%d", &delta_hosts) != 1)
return -EINVAL;
- if (sdebug_verbose)
- pr_info("prior num_hosts=%d, num_to_add=%d\n",
- atomic_read(&sdebug_num_hosts), delta_hosts);
- if (delta_hosts == 0)
- return count;
- if (mutex_trylock(&add_host_mutex) == 0)
- return -EBUSY;
if (delta_hosts > 0) {
- sdeb_add_n_hosts(delta_hosts);
- } else if (delta_hosts < 0) {
- smp_store_release(&sdebug_deflect_incoming, true);
- sdeb_block_all_queues();
- if (delta_hosts >= atomic_read(&sdebug_num_hosts))
- stop_all_queued(true);
do {
- if (atomic_read(&sdebug_num_hosts) < 1) {
- free_all_queued();
- break;
+ found = false;
+ if (want_phs) {
+ xa_for_each_marked(per_store_ap, idx, sip,
+ SDEB_XA_NOT_IN_USE) {
+ sdeb_most_recent_idx = (int)idx;
+ found = true;
+ break;
+ }
+ if (found) /* re-use case */
+ sdebug_add_host_helper((int)idx);
+ else
+ sdebug_do_add_host(true);
+ } else {
+ sdebug_do_add_host(false);
}
+ } while (--delta_hosts);
+ } else if (delta_hosts < 0) {
+ do {
sdebug_do_remove_host(false);
} while (++delta_hosts);
- sdeb_unblock_all_queues();
- smp_store_release(&sdebug_deflect_incoming, false);
}
- mutex_unlock(&add_host_mutex);
- if (sdebug_verbose)
- pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
return count;
}
static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
sdebug_add_host = 0;
for (k = 0; k < hosts_to_add; k++) {
- if (smp_load_acquire(&sdebug_deflect_incoming)) {
- pr_info("exit early as sdebug_deflect_incoming is set\n");
- return 0;
- }
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
}
}
if (sdebug_verbose)
- pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+ pr_info("built %d host(s)\n", sdebug_num_hosts);
- /*
- * Even though all the hosts have been established, due to async device (LU) scanning
- * by the scsi mid-level, there may still be devices (LUs) being set up.
- */
return 0;
bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
static void __exit scsi_debug_exit(void)
{
- int k;
+ int k = sdebug_num_hosts;
- /* Possible race with LUs still being set up; stop them asap */
- sdeb_block_all_queues();
- smp_store_release(&sdebug_deflect_incoming, true);
- stop_all_queued(false);
- for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+ stop_all_queued();
+ for (; k; k--)
sdebug_do_remove_host(true);
free_all_queued();
- if (sdebug_verbose)
- pr_info("removed %d hosts\n", k);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
error = device_register(&sdbg_host->dev);
if (error)
goto clean;
- atomic_inc(&sdebug_num_hosts);
+ ++sdebug_num_hosts;
return 0;
clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
return;
device_unregister(&sdbg_host->dev);
- atomic_dec(&sdebug_num_hosts);
+ --sdebug_num_hosts;
}
static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
int num_in_q = 0;
struct sdebug_dev_info *devip;
- sdeb_block_all_queues();
+ block_unblock_all_queues(true);
devip = (struct sdebug_dev_info *)sdev->hostdata;
if (NULL == devip) {
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return -ENODEV;
}
num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
__func__, qdepth, num_in_q);
}
- sdeb_unblock_all_queues();
+ block_unblock_all_queues(false);
return sdev->queue_depth;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 27951ea05dd4..2c0dd64159b0 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -86,6 +86,9 @@ struct iscsi_internal {
struct transport_container session_cont;
};
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name = \
static void iscsi_endpoint_release(struct device *dev)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, ep->id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
kfree(ep);
}
@@ -180,7 +188,7 @@ static ssize_t
show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
{
struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+ return sysfs_emit(buf, "%d\n", ep->id);
}
static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
.attrs = iscsi_endpoint_attrs,
};
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
- struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
- const uint64_t *epid = data;
-
- return *epid == ep->id;
-}
-
struct iscsi_endpoint *
iscsi_create_endpoint(int dd_size)
{
- struct device *dev;
struct iscsi_endpoint *ep;
- uint64_t id;
- int err;
-
- for (id = 1; id < ISCSI_MAX_EPID; id++) {
- dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
- iscsi_match_epid);
- if (!dev)
- break;
- else
- put_device(dev);
- }
- if (id == ISCSI_MAX_EPID) {
- printk(KERN_ERR "Too many connections. Max supported %u\n",
- ISCSI_MAX_EPID - 1);
- return NULL;
- }
+ int err, id;
ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
if (!ep)
return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+ if (id < 0) {
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+ id);
+ goto free_ep;
+ }
+ mutex_unlock(&iscsi_ep_idr_mutex);
+
ep->id = id;
ep->dev.class = &iscsi_endpoint_class;
- dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+ dev_set_name(&ep->dev, "ep-%d", id);
err = device_register(&ep->dev);
if (err)
- goto free_ep;
+ goto free_id;
err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
if (err)
@@ -248,6 +240,10 @@ unregister_dev:
device_unregister(&ep->dev);
return NULL;
+free_id:
+ mutex_lock(&iscsi_ep_idr_mutex);
+ idr_remove(&iscsi_ep_idr, id);
+ mutex_unlock(&iscsi_ep_idr_mutex);
free_ep:
kfree(ep);
return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
*/
struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
{
- struct device *dev;
+ struct iscsi_endpoint *ep;
- dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
- iscsi_match_epid);
- if (!dev)
- return NULL;
+ mutex_lock(&iscsi_ep_idr_mutex);
+ ep = idr_find(&iscsi_ep_idr, handle);
+ if (!ep)
+ goto unlock;
- return iscsi_dev_to_endpoint(dev);
+ get_device(&ep->dev);
+unlock:
+ mutex_unlock(&iscsi_ep_idr_mutex);
+ return ep;
}
EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
switch (flag) {
case STOP_CONN_RECOVER:
- conn->state = ISCSI_CONN_FAILED;
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
break;
case STOP_CONN_TERM:
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
break;
default:
iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
}
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+ struct iscsi_endpoint *ep;
+
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+ WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+ if (!conn->ep || !session->transport->ep_disconnect)
+ return;
+
+ ep = conn->ep;
+ conn->ep = NULL;
+
+ session->transport->unbind_conn(conn, is_active);
+ session->transport->ep_disconnect(ep);
+ ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+ struct iscsi_endpoint *ep,
+ bool is_active)
+{
+ /* Check if this was a conn error and the kernel took ownership */
+ spin_lock_irq(&conn->lock);
+ if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
+ iscsi_ep_disconnect(conn, is_active);
+ } else {
+ spin_unlock_irq(&conn->lock);
+ ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+ mutex_unlock(&conn->ep_mutex);
+
+ flush_work(&conn->cleanup_work);
+ /*
+ * Userspace is now done with the EP so we can release the ref
+ * iscsi_cleanup_conn_work_fn took.
+ */
+ iscsi_put_endpoint(ep);
+ mutex_lock(&conn->ep_mutex);
+ }
+}
+
static int iscsi_if_stop_conn(struct iscsi_transport *transport,
struct iscsi_uevent *ev)
{
@@ -2238,11 +2280,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
iscsi_stop_conn(conn, flag);
} else {
/*
+ * For offload, when iscsid is restarted it won't know about
+ * existing endpoints so it can't do a ep_disconnect. We clean
+ * it up here for userspace.
+ */
+ mutex_lock(&conn->ep_mutex);
+ if (conn->ep)
+ iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+ mutex_unlock(&conn->ep_mutex);
+
+ /*
* Figure out if it was the kernel or userspace initiating this.
*/
+ spin_lock_irq(&conn->lock);
if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
iscsi_stop_conn(conn, flag);
} else {
+ spin_unlock_irq(&conn->lock);
ISCSI_DBG_TRANS_CONN(conn,
"flush kernel conn cleanup.\n");
flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
* Only clear for recovery to avoid extra cleanup runs during
* termination.
*/
+ spin_lock_irq(&conn->lock);
clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+ spin_unlock_irq(&conn->lock);
}
ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
return 0;
}
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
- struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
- struct iscsi_endpoint *ep;
-
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
- conn->state = ISCSI_CONN_FAILED;
-
- if (!conn->ep || !session->transport->ep_disconnect)
- return;
-
- ep = conn->ep;
- conn->ep = NULL;
-
- session->transport->unbind_conn(conn, is_active);
- session->transport->ep_disconnect(ep);
- ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
{
struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
mutex_lock(&conn->ep_mutex);
/*
- * If we are not at least bound there is nothing for us to do. Userspace
- * will do a ep_disconnect call if offload is used, but will not be
- * doing a stop since there is nothing to clean up, so we have to clear
- * the cleanup bit here.
+ * Get a ref to the ep, so we don't release its ID until after
+ * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
*/
- if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
- ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
- clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
- mutex_unlock(&conn->ep_mutex);
- return;
- }
-
+ if (conn->ep)
+ get_device(&conn->ep->dev);
iscsi_ep_disconnect(conn, false);
if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
conn->dd_data = &conn[1];
mutex_init(&conn->ep_mutex);
+ spin_lock_init(&conn->lock);
INIT_LIST_HEAD(&conn->conn_list);
INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
conn->transport = transport;
conn->cid = cid;
- conn->state = ISCSI_CONN_DOWN;
+ WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
/* this is released in the dev's release function */
if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = nlmsg_total_size(sizeof(*ev));
+ unsigned long flags;
+ int state;
- if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
- queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+ spin_lock_irqsave(&conn->lock, flags);
+ /*
+ * Userspace will only do a stop call if we are at least bound. And, we
+ * only need to do the in kernel cleanup if in the UP state so cmds can
+ * be released to upper layers. If in other states just wait for
+ * userspace to avoid races that can leave the cleanup_work queued.
+ */
+ state = READ_ONCE(conn->state);
+ switch (state) {
+ case ISCSI_CONN_BOUND:
+ case ISCSI_CONN_UP:
+ if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+ &conn->flags)) {
+ queue_work(iscsi_conn_cleanup_workq,
+ &conn->cleanup_work);
+ }
+ break;
+ default:
+ ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+ state);
+ break;
+ }
+ spin_unlock_irqrestore(&conn->lock, flags);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
- int err = 0, value = 0;
+ int err = 0, value = 0, state;
if (ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- if ((conn->state == ISCSI_CONN_BOUND) ||
- (conn->state == ISCSI_CONN_UP)) {
+ state = READ_ONCE(conn->state);
+ if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
} else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
}
mutex_lock(&conn->ep_mutex);
- /* Check if this was a conn error and the kernel took ownership */
- if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
- ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
- mutex_unlock(&conn->ep_mutex);
-
- flush_work(&conn->cleanup_work);
- goto put_ep;
- }
-
- iscsi_ep_disconnect(conn, false);
+ iscsi_if_disconnect_bound_ep(conn, ep, false);
mutex_unlock(&conn->ep_mutex);
put_ep:
iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
return -EINVAL;
mutex_lock(&conn->ep_mutex);
+ spin_lock_irq(&conn->lock);
if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+ spin_unlock_irq(&conn->lock);
mutex_unlock(&conn->ep_mutex);
ev->r.retcode = -ENOTCONN;
return 0;
}
+ spin_unlock_irq(&conn->lock);
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_BIND_CONN:
- if (conn->ep) {
- /*
- * For offload boot support where iscsid is restarted
- * during the pivot root stage, the ep will be intact
- * here when the new iscsid instance starts up and
- * reconnects.
- */
- iscsi_ep_disconnect(conn, true);
- }
-
session = iscsi_session_lookup(ev->u.b_conn.sid);
if (!session) {
err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
ev->u.b_conn.transport_eph,
ev->u.b_conn.is_leading);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_BOUND;
+ WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
if (ev->r.retcode || !transport->ep_connect)
break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
case ISCSI_UEVENT_START_CONN:
ev->r.retcode = transport->start_conn(conn);
if (!ev->r.retcode)
- conn->state = ISCSI_CONN_UP;
+ WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
break;
case ISCSI_UEVENT_SEND_PDU:
pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
{
struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
const char *state = "unknown";
+ int conn_state = READ_ONCE(conn->state);
- if (conn->state >= 0 &&
- conn->state < ARRAY_SIZE(connection_state_names))
- state = connection_state_names[conn->state];
+ if (conn_state >= 0 &&
+ conn_state < ARRAY_SIZE(connection_state_names))
+ state = connection_state_names[conn_state];
return sysfs_emit(buf, "%s\n", state);
}
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index ddd00efc4882..fbdb5124d7f7 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
+err:
kfree(buffer);
return result;
}
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
int result;
unsigned char *buffer;
- buffer = kmalloc(32, GFP_KERNEL);
+ buffer = kzalloc(32, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
cgc.data_direction = DMA_FROM_DEVICE;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
+err:
kfree(buffer);
return result;
}
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
Scsi_CD *cd = cdi->handle;
struct packet_command cgc;
- char *buffer = kmalloc(32, GFP_KERNEL);
+ char *buffer = kzalloc(32, GFP_KERNEL);
int result;
if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
cgc.data_direction = DMA_FROM_DEVICE;
cgc.timeout = IOCTL_TIMEOUT;
result = sr_do_ioctl(cd, &cgc);
+ if (result)
+ goto err;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
+err:
kfree(buffer);
return result;
}
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 122f9c884b38..ccd0577a771e 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -50,7 +50,7 @@ struct imx8m_blk_ctrl_domain_data {
u32 mipi_phy_rst_mask;
};
-#define DOMAIN_MAX_CLKS 3
+#define DOMAIN_MAX_CLKS 4
struct imx8m_blk_ctrl_domain {
struct generic_pm_domain genpd;
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 92d9610df1fd..938017a60c8e 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
if (atmel_qspi_find_mode(op) < 0)
return false;
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 616ada891974..19686fb47bb3 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1415,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
!op->data.dtr;
- /* Mixed DTR modes not supported. */
- if (!(all_true || all_false))
+ if (all_true) {
+ /* Right now we only support 8-8-8 DTR mode. */
+ if (op->cmd.nbytes && op->cmd.buswidth != 8)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth != 8)
+ return false;
+ if (op->data.nbytes && op->data.buswidth != 8)
+ return false;
+ } else if (all_false) {
+ /* Only 1-1-X ops are supported without DTR */
+ if (op->cmd.nbytes && op->cmd.buswidth > 1)
+ return false;
+ if (op->addr.nbytes && op->addr.buswidth > 1)
+ return false;
+ } else {
+ /* Mixed DTR modes are not supported. */
return false;
+ }
return spi_mem_default_supports_op(mem, op);
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index a5ef7a526a7f..f6eec7a869b6 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 94fb09696677..d167699a1a96 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -960,7 +960,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
static int __maybe_unused mtk_nor_resume(struct device *dev)
{
- return pm_runtime_force_resume(dev);
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ mtk_nor_init(sp);
+
+ return 0;
}
static const struct dev_pm_ops mtk_nor_pm_ops = {
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ff292b75e23f..60dafe4c581b 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -588,7 +588,7 @@ static void pscsi_destroy_device(struct se_device *dev)
}
static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
- unsigned char *req_sense)
+ unsigned char *req_sense, int valid_data)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
@@ -681,7 +681,7 @@ after_mode_select:
* back despite framework assumption that a
* check condition means there is no data
*/
- if (sd->type == TYPE_TAPE &&
+ if (sd->type == TYPE_TAPE && valid_data &&
cmd->data_direction == DMA_FROM_DEVICE) {
/*
* is sense data valid, fixed format,
@@ -1032,6 +1032,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum sam_status scsi_status = scmd->result & 0xff;
+ int valid_data = cmd->data_length - scmd->resid_len;
u8 *cdb = cmd->priv;
if (scsi_status != SAM_STAT_GOOD) {
@@ -1039,12 +1040,11 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
- pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer);
+ pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
switch (host_byte(scmd->result)) {
case DID_OK:
- target_complete_cmd_with_length(cmd, scsi_status,
- cmd->data_length - scmd->resid_len);
+ target_complete_cmd_with_length(cmd, scsi_status, valid_data);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index a5eb4ef46971..c9b3b2cfb2b2 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -865,6 +865,7 @@ err_rhashtable_free:
rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
+ mutex_destroy(&optee->ffa.mutex);
err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
err_unreg_teedev:
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index e37691e0bf20..0e5cc948373c 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -113,8 +113,10 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
bool "user_space"
select THERMAL_GOV_USER_SPACE
help
- Select this if you want to let the user space manage the
- platform thermals.
+ The Userspace governor allows to get trip point crossed
+ notification from the kernel via uevents. It is recommended
+ to use the netlink interface instead which gives richer
+ information about the thermal framework events.
config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
bool "power_allocator"
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index 64a18e354a20..a62a4e90bd3f 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -17,8 +17,7 @@
static int user_space_bind(struct thermal_zone_device *tz)
{
- pr_warn_once("Userspace governor deprecated: use thermal netlink " \
- "notification instead\n");
+ pr_info_once("Consider using thermal netlink events interface\n");
return 0;
}
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 4954800b9850..d97f496bab9b 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -68,7 +68,7 @@ static int evaluate_odvp(struct int3400_thermal_priv *priv);
struct odvp_attr {
int odvp;
struct int3400_thermal_priv *priv;
- struct kobj_attribute attr;
+ struct device_attribute attr;
};
static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
@@ -311,7 +311,7 @@ end:
return result;
}
-static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
+static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct odvp_attr *odvp_attr;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index f154bada2906..1c4aac8464a7 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -610,9 +610,6 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
unsigned long state;
int result;
- dev_warn_once(&cdev->device,
- "Setting cooling device state is deprecated\n");
-
if (sscanf(buf, "%ld\n", &state) != 1)
return -EINVAL;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index fa92f727fdf8..a38b922bcbc1 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -73,6 +73,8 @@ module_param(debug, int, 0600);
*/
#define MAX_MRU 1500
#define MAX_MTU 1500
+/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */
+#define PROT_OVERHEAD 7
#define GSM_NET_TX_TIMEOUT (HZ*10)
/*
@@ -219,7 +221,6 @@ struct gsm_mux {
int encoding;
u8 control;
u8 fcs;
- u8 received_fcs;
u8 *txframe; /* TX framing buffer */
/* Method for the receiver side */
@@ -231,6 +232,7 @@ struct gsm_mux {
int initiator; /* Did we initiate connection */
bool dead; /* Has the mux been shut down */
struct gsm_dlci *dlci[NUM_DLCI];
+ int old_c_iflag; /* termios c_iflag value before attach */
bool constipated; /* Asked by remote to shut up */
spinlock_t tx_lock;
@@ -271,10 +273,6 @@ static DEFINE_SPINLOCK(gsm_mux_lock);
static struct tty_driver *gsm_tty_driver;
-/* Save dlci open address */
-static int addr_open[256] = { 0 };
-/* Save dlci open count */
-static int addr_cnt;
/*
* This section of the driver logic implements the GSM encodings
* both the basic and the 'advanced'. Reliable transport is not
@@ -369,6 +367,7 @@ static const u8 gsm_fcs8[256] = {
#define GOOD_FCS 0xCF
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
+static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
/**
* gsm_fcs_add - update FCS
@@ -832,7 +831,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
break;
case 2: /* Unstructed with modem bits.
Always one byte as we never send inline break data */
- *dp++ = gsm_encode_modem(dlci);
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
break;
}
WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len);
@@ -917,6 +916,66 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
}
/**
+ * gsm_dlci_modem_output - try and push modem status out of a DLCI
+ * @gsm: mux
+ * @dlci: the DLCI to pull modem status from
+ * @brk: break signal
+ *
+ * Push an empty frame in to the transmit queue to update the modem status
+ * bits and to transmit an optional break.
+ *
+ * Caller must hold the tx_lock of the mux.
+ */
+
+static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
+ u8 brk)
+{
+ u8 *dp = NULL;
+ struct gsm_msg *msg;
+ int size = 0;
+
+ /* for modem bits without break data */
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits. */
+ size++;
+ if (brk > 0)
+ size++;
+ break;
+ default:
+ pr_err("%s: unsupported adaption %d\n", __func__,
+ dlci->adaption);
+ return -EINVAL;
+ }
+
+ msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype);
+ if (!msg) {
+ pr_err("%s: gsm_data_alloc error", __func__);
+ return -ENOMEM;
+ }
+ dp = msg->data;
+ switch (dlci->adaption) {
+ case 1: /* Unstructured */
+ break;
+ case 2: /* Unstructured with modem bits. */
+ if (brk == 0) {
+ *dp++ = (gsm_encode_modem(dlci) << 1) | EA;
+ } else {
+ *dp++ = gsm_encode_modem(dlci) << 1;
+ *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */
+ }
+ break;
+ default:
+ /* Handled above */
+ break;
+ }
+
+ __gsm_data_queue(dlci, msg);
+ return size;
+}
+
+/**
* gsm_dlci_data_sweep - look for data to send
* @gsm: the GSM mux
*
@@ -1093,7 +1152,6 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
- unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
int slen;
@@ -1123,17 +1181,8 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
}
len--;
- if (len > 0) {
- while (gsm_read_ea(&brk, *dp++) == 0) {
- len--;
- if (len == 0)
- return;
- }
- modem <<= 7;
- modem |= (brk & 0x7f);
- }
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, slen);
+ gsm_process_modem(tty, dlci, modem, slen - len);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1193,7 +1242,6 @@ static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen)
}
static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
-static void gsm_dlci_close(struct gsm_dlci *dlci);
/**
* gsm_control_message - DLCI 0 control processing
@@ -1212,28 +1260,15 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
{
u8 buf[1];
unsigned long flags;
- struct gsm_dlci *dlci;
- int i;
- int address;
switch (command) {
case CMD_CLD: {
- if (addr_cnt > 0) {
- for (i = 0; i < addr_cnt; i++) {
- address = addr_open[i];
- dlci = gsm->dlci[address];
- gsm_dlci_close(dlci);
- addr_open[i] = 0;
- }
- }
+ struct gsm_dlci *dlci = gsm->dlci[0];
/* Modem wishes to close down */
- dlci = gsm->dlci[0];
if (dlci) {
dlci->dead = true;
gsm->dead = true;
- gsm_dlci_close(dlci);
- addr_cnt = 0;
- gsm_response(gsm, 0, UA|PF);
+ gsm_dlci_begin_close(dlci);
}
}
break;
@@ -1326,11 +1361,12 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
{
- struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype);
+ struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype);
if (msg == NULL)
return;
- msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */
- memcpy(msg->data + 1, ctrl->data, ctrl->len);
+ msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */
+ msg->data[1] = (ctrl->len << 1) | EA;
+ memcpy(msg->data + 2, ctrl->data, ctrl->len);
gsm_data_queue(gsm->dlci[0], msg);
}
@@ -1353,7 +1389,6 @@ static void gsm_control_retransmit(struct timer_list *t)
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
if (ctrl) {
- gsm->cretries--;
if (gsm->cretries == 0) {
gsm->pending_cmd = NULL;
ctrl->error = -ETIMEDOUT;
@@ -1362,6 +1397,7 @@ static void gsm_control_retransmit(struct timer_list *t)
wake_up(&gsm->event);
return;
}
+ gsm->cretries--;
gsm_control_transmit(gsm, ctrl);
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
}
@@ -1402,7 +1438,7 @@ retry:
/* If DLCI0 is in ADM mode skip retries, it won't respond */
if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
- gsm->cretries = 1;
+ gsm->cretries = 0;
else
gsm->cretries = gsm->n2;
@@ -1450,20 +1486,22 @@ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
static void gsm_dlci_close(struct gsm_dlci *dlci)
{
+ unsigned long flags;
+
del_timer(&dlci->t1);
if (debug & 8)
pr_debug("DLCI %d goes closed.\n", dlci->addr);
dlci->state = DLCI_CLOSED;
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
+ spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
/* Ensure that gsmtty_open() can return. */
tty_port_set_initialized(&dlci->port, 0);
wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
- /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
- tty_unregister_device(gsm_tty_driver, dlci->addr);
wake_up(&dlci->gsm->event);
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
@@ -1485,8 +1523,9 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
dlci->state = DLCI_OPEN;
if (debug & 8)
pr_debug("DLCI %d goes open.\n", dlci->addr);
- /* Register gsmtty driver,report gsmtty dev add uevent for user */
- tty_register_device(gsm_tty_driver, dlci->addr, NULL);
+ /* Send current modem state */
+ if (dlci->addr)
+ gsm_modem_update(dlci, 0);
wake_up(&dlci->gsm->event);
}
@@ -1623,6 +1662,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
tty = tty_port_tty_get(port);
if (tty) {
gsm_process_modem(tty, dlci, modem, slen);
+ tty_wakeup(tty);
tty_kref_put(tty);
}
fallthrough;
@@ -1793,19 +1833,7 @@ static void gsm_queue(struct gsm_mux *gsm)
struct gsm_dlci *dlci;
u8 cr;
int address;
- int i, j, k, address_tmp;
- /* We have to sneak a look at the packet body to do the FCS.
- A somewhat layering violation in the spec */
- if ((gsm->control & ~PF) == UI)
- gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
- if (gsm->encoding == 0) {
- /* WARNING: gsm->received_fcs is used for
- gsm->encoding = 0 only.
- In this case it contain the last piece of data
- required to generate final CRC */
- gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
- }
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
if (debug & 4)
@@ -1836,11 +1864,6 @@ static void gsm_queue(struct gsm_mux *gsm)
else {
gsm_response(gsm, address, UA|PF);
gsm_dlci_open(dlci);
- /* Save dlci open address */
- if (address) {
- addr_open[addr_cnt] = address;
- addr_cnt++;
- }
}
break;
case DISC|PF:
@@ -1851,35 +1874,9 @@ static void gsm_queue(struct gsm_mux *gsm)
return;
}
/* Real close complete */
- if (!address) {
- if (addr_cnt > 0) {
- for (i = 0; i < addr_cnt; i++) {
- address = addr_open[i];
- dlci = gsm->dlci[address];
- gsm_dlci_close(dlci);
- addr_open[i] = 0;
- }
- }
- dlci = gsm->dlci[0];
- gsm_dlci_close(dlci);
- addr_cnt = 0;
- gsm_response(gsm, 0, UA|PF);
- } else {
- gsm_response(gsm, address, UA|PF);
- gsm_dlci_close(dlci);
- /* clear dlci address */
- for (j = 0; j < addr_cnt; j++) {
- address_tmp = addr_open[j];
- if (address_tmp == address) {
- for (k = j; k < addr_cnt; k++)
- addr_open[k] = addr_open[k+1];
- addr_cnt--;
- break;
- }
- }
- }
+ gsm_response(gsm, address, UA|PF);
+ gsm_dlci_close(dlci);
break;
- case UA:
case UA|PF:
if (cr == 0 || dlci == NULL)
break;
@@ -1993,19 +1990,25 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
break;
case GSM_DATA: /* Data */
gsm->buf[gsm->count++] = c;
- if (gsm->count == gsm->len)
+ if (gsm->count == gsm->len) {
+ /* Calculate final FCS for UI frames over all data */
+ if ((gsm->control & ~PF) != UIH) {
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ gsm->count);
+ }
gsm->state = GSM_FCS;
+ }
break;
case GSM_FCS: /* FCS follows the packet */
- gsm->received_fcs = c;
- gsm_queue(gsm);
+ gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->state = GSM_SSOF;
break;
case GSM_SSOF:
- if (c == GSM0_SOF) {
- gsm->state = GSM_SEARCH;
- break;
- }
+ gsm->state = GSM_SEARCH;
+ if (c == GSM0_SOF)
+ gsm_queue(gsm);
+ else
+ gsm->bad_size++;
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
@@ -2023,12 +2026,35 @@ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c)
static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
{
+ /* handle XON/XOFF */
+ if ((c & ISO_IEC_646_MASK) == XON) {
+ gsm->constipated = true;
+ return;
+ } else if ((c & ISO_IEC_646_MASK) == XOFF) {
+ gsm->constipated = false;
+ /* Kick the link in case it is idling */
+ gsm_data_kick(gsm, NULL);
+ return;
+ }
if (c == GSM1_SOF) {
- /* EOF is only valid in frame if we have got to the data state
- and received at least one byte (the FCS) */
- if (gsm->state == GSM_DATA && gsm->count) {
- /* Extract the FCS */
+ /* EOF is only valid in frame if we have got to the data state */
+ if (gsm->state == GSM_DATA) {
+ if (gsm->count < 1) {
+ /* Missing FSC */
+ gsm->malformed++;
+ gsm->state = GSM_START;
+ return;
+ }
+ /* Remove the FCS from data */
gsm->count--;
+ if ((gsm->control & ~PF) != UIH) {
+ /* Calculate final FCS for UI frames over all
+ * data but FCS
+ */
+ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
+ gsm->count);
+ }
+ /* Add the FCS itself to test against GOOD_FCS */
gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
gsm->len = gsm->count;
gsm_queue(gsm);
@@ -2037,7 +2063,8 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
}
/* Any partial frame was a runt so go back to start */
if (gsm->state != GSM_START) {
- gsm->malformed++;
+ if (gsm->state != GSM_SEARCH)
+ gsm->malformed++;
gsm->state = GSM_START;
}
/* A SOF in GSM_START means we are still reading idling or
@@ -2106,74 +2133,43 @@ static void gsm_error(struct gsm_mux *gsm)
gsm->io_error++;
}
-static int gsm_disconnect(struct gsm_mux *gsm)
-{
- struct gsm_dlci *dlci = gsm->dlci[0];
- struct gsm_control *gc;
-
- if (!dlci)
- return 0;
-
- /* In theory disconnecting DLCI 0 is sufficient but for some
- modems this is apparently not the case. */
- gc = gsm_control_send(gsm, CMD_CLD, NULL, 0);
- if (gc)
- gsm_control_wait(gsm, gc);
-
- del_timer_sync(&gsm->t2_timer);
- /* Now we are sure T2 has stopped */
-
- gsm_dlci_begin_close(dlci);
- wait_event_interruptible(gsm->event,
- dlci->state == DLCI_CLOSED);
-
- if (signal_pending(current))
- return -EINTR;
-
- return 0;
-}
-
/**
* gsm_cleanup_mux - generic GSM protocol cleanup
* @gsm: our mux
+ * @disc: disconnect link?
*
* Clean up the bits of the mux which are the same for all framing
* protocols. Remove the mux from the mux table, stop all the timers
* and then shut down each device hanging up the channels as we go.
*/
-static void gsm_cleanup_mux(struct gsm_mux *gsm)
+static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
{
int i;
struct gsm_dlci *dlci = gsm->dlci[0];
struct gsm_msg *txq, *ntxq;
gsm->dead = true;
+ mutex_lock(&gsm->mutex);
- spin_lock(&gsm_mux_lock);
- for (i = 0; i < MAX_MUX; i++) {
- if (gsm_mux[i] == gsm) {
- gsm_mux[i] = NULL;
- break;
+ if (dlci) {
+ if (disc && dlci->state != DLCI_CLOSED) {
+ gsm_dlci_begin_close(dlci);
+ wait_event(gsm->event, dlci->state == DLCI_CLOSED);
}
+ dlci->dead = true;
}
- spin_unlock(&gsm_mux_lock);
- /* open failed before registering => nothing to do */
- if (i == MAX_MUX)
- return;
+ /* Finish outstanding timers, making sure they are done */
del_timer_sync(&gsm->t2_timer);
- /* Now we are sure T2 has stopped */
- if (dlci)
- dlci->dead = true;
- /* Free up any link layer users */
- mutex_lock(&gsm->mutex);
- for (i = 0; i < NUM_DLCI; i++)
+ /* Free up any link layer users and finally the control channel */
+ for (i = NUM_DLCI - 1; i >= 0; i--)
if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
+ tty_ldisc_flush(gsm->tty);
list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list)
kfree(txq);
INIT_LIST_HEAD(&gsm->tx_list);
@@ -2191,7 +2187,6 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
static int gsm_activate_mux(struct gsm_mux *gsm)
{
struct gsm_dlci *dlci;
- int i = 0;
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
init_waitqueue_head(&gsm->event);
@@ -2203,18 +2198,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
else
gsm->receive = gsm1_receive;
- spin_lock(&gsm_mux_lock);
- for (i = 0; i < MAX_MUX; i++) {
- if (gsm_mux[i] == NULL) {
- gsm->num = i;
- gsm_mux[i] = gsm;
- break;
- }
- }
- spin_unlock(&gsm_mux_lock);
- if (i == MAX_MUX)
- return -EBUSY;
-
dlci = gsm_dlci_alloc(gsm, 0);
if (dlci == NULL)
return -ENOMEM;
@@ -2230,6 +2213,15 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
*/
static void gsm_free_mux(struct gsm_mux *gsm)
{
+ int i;
+
+ for (i = 0; i < MAX_MUX; i++) {
+ if (gsm == gsm_mux[i]) {
+ gsm_mux[i] = NULL;
+ break;
+ }
+ }
+ mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
kfree(gsm);
@@ -2249,12 +2241,20 @@ static void gsm_free_muxr(struct kref *ref)
static inline void mux_get(struct gsm_mux *gsm)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm_mux_lock, flags);
kref_get(&gsm->ref);
+ spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline void mux_put(struct gsm_mux *gsm)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm_mux_lock, flags);
kref_put(&gsm->ref, gsm_free_muxr);
+ spin_unlock_irqrestore(&gsm_mux_lock, flags);
}
static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
@@ -2275,6 +2275,7 @@ static inline unsigned int mux_line_to_num(unsigned int line)
static struct gsm_mux *gsm_alloc_mux(void)
{
+ int i;
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
if (gsm == NULL)
return NULL;
@@ -2283,7 +2284,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
kfree(gsm);
return NULL;
}
- gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL);
+ gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL);
if (gsm->txframe == NULL) {
kfree(gsm->buf);
kfree(gsm);
@@ -2304,6 +2305,26 @@ static struct gsm_mux *gsm_alloc_mux(void)
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
+ /* Store the instance to the mux array or abort if no space is
+ * available.
+ */
+ spin_lock(&gsm_mux_lock);
+ for (i = 0; i < MAX_MUX; i++) {
+ if (!gsm_mux[i]) {
+ gsm_mux[i] = gsm;
+ gsm->num = i;
+ break;
+ }
+ }
+ spin_unlock(&gsm_mux_lock);
+ if (i == MAX_MUX) {
+ mutex_destroy(&gsm->mutex);
+ kfree(gsm->txframe);
+ kfree(gsm->buf);
+ kfree(gsm);
+ return NULL;
+ }
+
return gsm;
}
@@ -2339,7 +2360,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
/* Check the MRU/MTU range looks sane */
if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8)
return -EINVAL;
- if (c->n2 < 3)
+ if (c->n2 > 255)
return -EINVAL;
if (c->encapsulation > 1) /* Basic, advanced, no I */
return -EINVAL;
@@ -2370,19 +2391,11 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
/*
* Close down what is needed, restart and initiate the new
- * configuration
+ * configuration. On the first time there is no DLCI[0]
+ * and closing or cleaning up is not necessary.
*/
-
- if (gsm->initiator && (need_close || need_restart)) {
- int ret;
-
- ret = gsm_disconnect(gsm);
-
- if (ret)
- return ret;
- }
- if (need_restart)
- gsm_cleanup_mux(gsm);
+ if (need_close || need_restart)
+ gsm_cleanup_mux(gsm, true);
gsm->initiator = c->initiator;
gsm->mru = c->mru;
@@ -2450,25 +2463,26 @@ static int gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
int ret, i;
gsm->tty = tty_kref_get(tty);
+ /* Turn off tty XON/XOFF handling to handle it explicitly. */
+ gsm->old_c_iflag = tty->termios.c_iflag;
+ tty->termios.c_iflag &= (IXON | IXOFF);
ret = gsm_activate_mux(gsm);
if (ret != 0)
tty_kref_put(gsm->tty);
else {
/* Don't register device 0 - this is the control channel and not
a usable tty interface */
- if (gsm->initiator) {
- base = mux_num_to_base(gsm); /* Base for this MUX */
- for (i = 1; i < NUM_DLCI; i++) {
- struct device *dev;
+ base = mux_num_to_base(gsm); /* Base for this MUX */
+ for (i = 1; i < NUM_DLCI; i++) {
+ struct device *dev;
- dev = tty_register_device(gsm_tty_driver,
+ dev = tty_register_device(gsm_tty_driver,
base + i, NULL);
- if (IS_ERR(dev)) {
- for (i--; i >= 1; i--)
- tty_unregister_device(gsm_tty_driver,
- base + i);
- return PTR_ERR(dev);
- }
+ if (IS_ERR(dev)) {
+ for (i--; i >= 1; i--)
+ tty_unregister_device(gsm_tty_driver,
+ base + i);
+ return PTR_ERR(dev);
}
}
}
@@ -2490,11 +2504,10 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
int i;
WARN_ON(tty != gsm->tty);
- if (gsm->initiator) {
- for (i = 1; i < NUM_DLCI; i++)
- tty_unregister_device(gsm_tty_driver, base + i);
- }
- gsm_cleanup_mux(gsm);
+ for (i = 1; i < NUM_DLCI; i++)
+ tty_unregister_device(gsm_tty_driver, base + i);
+ /* Restore tty XON/XOFF handling. */
+ gsm->tty->termios.c_iflag = gsm->old_c_iflag;
tty_kref_put(gsm->tty);
gsm->tty = NULL;
}
@@ -2559,6 +2572,12 @@ static void gsmld_close(struct tty_struct *tty)
{
struct gsm_mux *gsm = tty->disc_data;
+ /* The ldisc locks and closes the port before calling our close. This
+ * means we have no way to do a proper disconnect. We will not bother
+ * to do one.
+ */
+ gsm_cleanup_mux(gsm, false);
+
gsmld_detach_gsm(tty, gsm);
gsmld_flush_buffer(tty);
@@ -2597,7 +2616,7 @@ static int gsmld_open(struct tty_struct *tty)
ret = gsmld_attach_gsm(tty, gsm);
if (ret != 0) {
- gsm_cleanup_mux(gsm);
+ gsm_cleanup_mux(gsm, false);
mux_put(gsm);
}
return ret;
@@ -2954,26 +2973,78 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
#define TX_SIZE 512
-static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+/**
+ * gsm_modem_upd_via_data - send modem bits via convergence layer
+ * @dlci: channel
+ * @brk: break signal
+ *
+ * Send an empty frame to signal mobile state changes and to transmit the
+ * break signal for adaption 2.
+ */
+
+static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
- u8 modembits[5];
+ struct gsm_mux *gsm = dlci->gsm;
+ unsigned long flags;
+
+ if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
+ return;
+
+ spin_lock_irqsave(&gsm->tx_lock, flags);
+ gsm_dlci_modem_output(gsm, dlci, brk);
+ spin_unlock_irqrestore(&gsm->tx_lock, flags);
+}
+
+/**
+ * gsm_modem_upd_via_msc - send modem bits via control frame
+ * @dlci: channel
+ * @brk: break signal
+ */
+
+static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
+{
+ u8 modembits[3];
struct gsm_control *ctrl;
int len = 2;
- if (brk)
- len++;
+ if (dlci->gsm->encoding != 0)
+ return 0;
- modembits[0] = len << 1 | EA; /* Data bytes */
- modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */
- modembits[2] = gsm_encode_modem(dlci) << 1 | EA;
- if (brk)
- modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */
- ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1);
+ modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
+ if (!brk) {
+ modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
+ } else {
+ modembits[1] = gsm_encode_modem(dlci) << 1;
+ modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */
+ len++;
+ }
+ ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len);
if (ctrl == NULL)
return -ENOMEM;
return gsm_control_wait(dlci->gsm, ctrl);
}
+/**
+ * gsm_modem_update - send modem status line state
+ * @dlci: channel
+ * @brk: break signal
+ */
+
+static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
+{
+ if (dlci->adaption == 2) {
+ /* Send convergence layer type 2 empty data frame. */
+ gsm_modem_upd_via_data(dlci, brk);
+ return 0;
+ } else if (dlci->gsm->encoding == 0) {
+ /* Send as MSC control message. */
+ return gsm_modem_upd_via_msc(dlci, brk);
+ }
+
+ /* Modem status lines are not supported. */
+ return -EPROTONOSUPPORT;
+}
+
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
@@ -3006,7 +3077,7 @@ static void gsm_dtr_rts(struct tty_port *port, int onoff)
modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
if (modem_tx != dlci->modem_tx) {
dlci->modem_tx = modem_tx;
- gsmtty_modem_update(dlci, 0);
+ gsm_modem_update(dlci, 0);
}
}
@@ -3155,13 +3226,17 @@ static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
static void gsmtty_flush_buffer(struct tty_struct *tty)
{
struct gsm_dlci *dlci = tty->driver_data;
+ unsigned long flags;
+
if (dlci->state == DLCI_CLOSED)
return;
/* Caution needed: If we implement reliable transport classes
then the data being transmitted can't simply be junked once
it has first hit the stack. Until then we can just blow it
away */
+ spin_lock_irqsave(&dlci->lock, flags);
kfifo_reset(&dlci->fifo);
+ spin_unlock_irqrestore(&dlci->lock, flags);
/* Need to unhook this DLCI from the transmit queue logic */
}
@@ -3193,7 +3268,7 @@ static int gsmtty_tiocmset(struct tty_struct *tty,
if (modem_tx != dlci->modem_tx) {
dlci->modem_tx = modem_tx;
- return gsmtty_modem_update(dlci, 0);
+ return gsm_modem_update(dlci, 0);
}
return 0;
}
@@ -3254,7 +3329,7 @@ static void gsmtty_throttle(struct tty_struct *tty)
dlci->modem_tx &= ~TIOCM_RTS;
dlci->throttled = true;
/* Send an MSC with RTS cleared */
- gsmtty_modem_update(dlci, 0);
+ gsm_modem_update(dlci, 0);
}
static void gsmtty_unthrottle(struct tty_struct *tty)
@@ -3266,7 +3341,7 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
dlci->modem_tx |= TIOCM_RTS;
dlci->throttled = false;
/* Send an MSC with RTS set */
- gsmtty_modem_update(dlci, 0);
+ gsm_modem_update(dlci, 0);
}
static int gsmtty_break_ctl(struct tty_struct *tty, int state)
@@ -3284,7 +3359,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
if (encode > 0x0F)
encode = 0x0F; /* Best effort */
}
- return gsmtty_modem_update(dlci, encode);
+ return gsm_modem_update(dlci, encode);
}
static void gsmtty_cleanup(struct tty_struct *tty)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e17e97ea86fa..a293e9f107d0 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2667,7 +2667,7 @@ enum pci_board_num_t {
pbn_panacom2,
pbn_panacom4,
pbn_plx_romulus,
- pbn_endrun_2_4000000,
+ pbn_endrun_2_3906250,
pbn_oxsemi,
pbn_oxsemi_1_3906250,
pbn_oxsemi_2_3906250,
@@ -3195,10 +3195,10 @@ static struct pciserial_board pci_boards[] = {
* signal now many ports are available
* 2 port 952 Uart support
*/
- [pbn_endrun_2_4000000] = {
+ [pbn_endrun_2_3906250] = {
.flags = FL_BASE0,
.num_ports = 2,
- .base_baud = 4000000,
+ .base_baud = 3906250,
.uart_offset = 0x200,
.first_offset = 0x1000,
},
@@ -4115,7 +4115,7 @@ static const struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_endrun_2_4000000 },
+ pbn_endrun_2_3906250 },
/*
* Quatech cards. These actually have configurable clocks but for
* now we just use the default.
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 318af6f13605..1fbd5bf264be 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1675,11 +1675,11 @@ static void serial8250_start_tx(struct uart_port *port)
struct uart_8250_port *up = up_to_u8250p(port);
struct uart_8250_em485 *em485 = up->em485;
- serial8250_rpm_get_tx(up);
-
if (!port->x_char && uart_circ_empty(&port->state->xmit))
return;
+ serial8250_rpm_get_tx(up);
+
if (em485 &&
em485->active_timer == &em485->start_tx_timer)
return;
@@ -3329,7 +3329,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
serial8250_set_divisor(port, baud, quot, frac);
serial_port_out(port, UART_LCR, up->lcr);
- serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
+ serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
/*
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 51ecb050ae40..4d11a3e547f9 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1255,13 +1255,18 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
static void pl011_rs485_tx_stop(struct uart_amba_port *uap)
{
+ /*
+ * To be on the safe side only time out after twice as many iterations
+ * as fifo size.
+ */
+ const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2;
struct uart_port *port = &uap->port;
int i = 0;
u32 cr;
/* Wait until hardware tx queue is empty */
while (!pl011_tx_empty(port)) {
- if (i == port->fifosize) {
+ if (i > MAX_TX_DRAIN_ITERS) {
dev_warn(port->dev,
"timeout while draining hardware tx queue\n");
break;
@@ -2052,7 +2057,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
* with the given baud rate. We use this as the poll interval when we
* wait for the tx queue to empty.
*/
- uap->rs485_tx_drain_interval = (bits * 1000 * 1000) / baud;
+ uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud);
pl011_setup_status_masks(port, termios);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index fd38e6ed4fda..a2100be8d554 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1448,7 +1448,7 @@ static int imx_uart_startup(struct uart_port *port)
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR);
- if (!sport->dma_is_enabled)
+ if (!dma_is_inited)
ucr4 |= UCR4_OREN;
if (sport->inverted_rx)
ucr4 |= UCR4_INVR;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index e857fb61efbf..5fb201c1b563 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1238,12 +1238,10 @@ static void sc16is7xx_shutdown(struct uart_port *port)
/* Disable all interrupts */
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0);
- /* Disable TX/RX, clear auto RS485 and RTS invert */
+ /* Disable TX/RX */
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
- SC16IS7XX_EFCR_TXDISABLE_BIT |
- SC16IS7XX_EFCR_AUTO_RS485_BIT |
- SC16IS7XX_EFCR_RTS_INVERT_BIT,
+ SC16IS7XX_EFCR_TXDISABLE_BIT,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index f9af7ebe003d..d6d515d598dc 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -2684,6 +2684,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
struct usb_request *request;
struct cdns3_request *priv_req;
struct cdns3_trb *trb = NULL;
+ struct cdns3_trb trb_tmp;
int ret;
int val;
@@ -2693,8 +2694,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
priv_req = to_cdns3_request(request);
trb = priv_req->trb;
- if (trb)
+ if (trb) {
+ trb_tmp = *trb;
trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ }
}
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
@@ -2709,7 +2712,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
if (request) {
if (trb)
- trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE);
+ *trb = trb_tmp;
cdns3_rearm_transfer(priv_ep, 1);
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6abb7294e919..b5b85bf80329 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1209,12 +1209,16 @@ static int do_proc_control(struct usb_dev_state *ps,
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &actlen);
+
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen);
if (!i && actlen) {
if (copy_to_user(ctrl->data, tbuf, actlen)) {
ret = -EFAULT;
- goto recv_fault;
+ goto done;
}
}
} else {
@@ -1231,6 +1235,10 @@ static int do_proc_control(struct usb_dev_state *ps,
usb_unlock_device(dev);
i = usbfs_start_wait_urb(urb, tmo, &actlen);
+
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0);
}
@@ -1242,10 +1250,6 @@ static int do_proc_control(struct usb_dev_state *ps,
}
ret = (i < 0 ? i : actlen);
- recv_fault:
- /* Linger a bit, prior to the next control message. */
- if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
- msleep(200);
done:
kfree(dr);
usb_free_urb(urb);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d3c14b5ed4a1..97b44a68668a 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -404,6 +404,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
+ { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Realtek hub in Dell WD19 (Type-C) */
{ USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
@@ -507,6 +510,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* VCOM device */
+ { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 1170b800acdc..d28cd1a6709b 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -274,7 +274,8 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= DWC3_DCTL_CSFTRST;
- dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ reg &= ~DWC3_DCTL_RUN_STOP;
+ dwc3_gadget_dctl_write_safe(dwc, reg);
/*
* For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit
@@ -1377,10 +1378,10 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 lpm_nyet_threshold;
u8 tx_de_emphasis;
u8 hird_threshold;
- u8 rx_thr_num_pkt_prd;
- u8 rx_max_burst_prd;
- u8 tx_thr_num_pkt_prd;
- u8 tx_max_burst_prd;
+ u8 rx_thr_num_pkt_prd = 0;
+ u8 rx_max_burst_prd = 0;
+ u8 tx_thr_num_pkt_prd = 0;
+ u8 tx_max_burst_prd = 0;
u8 tx_fifo_resize_max_num;
const char *usb_psy_name;
int ret;
@@ -1690,21 +1691,44 @@ static int dwc3_probe(struct platform_device *pdev)
/*
* Clocks are optional, but new DT platforms should support all
* clocks as required by the DT-binding.
+ * Some devices have different clock names in legacy device trees,
+ * check for them to retain backwards compatibility.
*/
dwc->bus_clk = devm_clk_get_optional(dev, "bus_early");
if (IS_ERR(dwc->bus_clk))
return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
"could not get bus clock\n");
+ if (dwc->bus_clk == NULL) {
+ dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk");
+ if (IS_ERR(dwc->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc->bus_clk),
+ "could not get bus clock\n");
+ }
+
dwc->ref_clk = devm_clk_get_optional(dev, "ref");
if (IS_ERR(dwc->ref_clk))
return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
"could not get ref clock\n");
+ if (dwc->ref_clk == NULL) {
+ dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk");
+ if (IS_ERR(dwc->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc->ref_clk),
+ "could not get ref clock\n");
+ }
+
dwc->susp_clk = devm_clk_get_optional(dev, "suspend");
if (IS_ERR(dwc->susp_clk))
return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
"could not get suspend clock\n");
+
+ if (dwc->susp_clk == NULL) {
+ dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk");
+ if (IS_ERR(dwc->susp_clk))
+ return dev_err_probe(dev, PTR_ERR(dwc->susp_clk),
+ "could not get suspend clock\n");
+ }
}
ret = reset_control_deassert(dwc->reset);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index b60b5f7b6dff..8cad9e7d3368 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -584,16 +584,15 @@ int dwc3_drd_init(struct dwc3 *dwc)
{
int ret, irq;
+ if (ROLE_SWITCH &&
+ device_property_read_bool(dwc->dev, "usb-role-switch"))
+ return dwc3_setup_role_switch(dwc);
+
dwc->edev = dwc3_get_extcon(dwc);
if (IS_ERR(dwc->edev))
return PTR_ERR(dwc->edev);
- if (ROLE_SWITCH &&
- device_property_read_bool(dwc->dev, "usb-role-switch")) {
- ret = dwc3_setup_role_switch(dwc);
- if (ret < 0)
- return ret;
- } else if (dwc->edev) {
+ if (dwc->edev) {
dwc->edev_nb.notifier_call = dwc3_drd_notifier;
ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST,
&dwc->edev_nb);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 33f657d83246..2e19e0e4ea53 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -45,6 +45,8 @@
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
+#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
+#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -456,6 +458,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index ab725d2262d6..0b9c2493844a 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3274,6 +3274,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event,
struct dwc3_request *req, int status)
{
+ int request_status;
int ret;
if (req->request.num_mapped_sgs)
@@ -3294,7 +3295,35 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->needs_extra_trb = false;
}
- dwc3_gadget_giveback(dep, req, status);
+ /*
+ * The event status only reflects the status of the TRB with IOC set.
+ * For the requests that don't set interrupt on completion, the driver
+ * needs to check and return the status of the completed TRBs associated
+ * with the request. Use the status of the last TRB of the request.
+ */
+ if (req->request.no_interrupt) {
+ struct dwc3_trb *trb;
+
+ trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
+ switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
+ case DWC3_TRBSTS_MISSED_ISOC:
+ /* Isoc endpoint only */
+ request_status = -EXDEV;
+ break;
+ case DWC3_TRB_STS_XFER_IN_PROG:
+ /* Applicable when End Transfer with ForceRM=0 */
+ case DWC3_TRBSTS_SETUP_PENDING:
+ /* Control endpoint only */
+ case DWC3_TRBSTS_OK:
+ default:
+ request_status = 0;
+ break;
+ }
+ } else {
+ request_status = status;
+ }
+
+ dwc3_gadget_giveback(dep, req, request_status);
out:
return ret;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 1fb837d9271e..84b73cb03f87 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1438,6 +1438,8 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
usb_ep_autoconfig_reset(cdev->gadget);
spin_lock_irqsave(&gi->spinlock, flags);
cdev->gadget = NULL;
+ cdev->deactivations = 0;
+ gadget->deactivated = false;
set_gadget_data(gadget, NULL);
spin_unlock_irqrestore(&gi->spinlock, flags);
}
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index d852ac9e47e7..2cda982f3765 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -264,6 +264,8 @@ void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
buf->state = UVC_BUF_STATE_ERROR;
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
}
+ queue->buf_used = 0;
+
/* This must be protected by the irqlock spinlock to avoid race
* conditions between uvc_queue_buffer and the disconnection event that
* could result in an interruptible wait in uvc_dequeue_buffer. Do not
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 3d82e0b853be..684164fa9716 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1103,6 +1103,26 @@ static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev)
#ifdef CONFIG_PM
+/* Clear wakeup signal locked in zhaoxin platform when device plug in. */
+static void ehci_zx_wakeup_clear(struct ehci_hcd *ehci)
+{
+ u32 __iomem *reg = &ehci->regs->port_status[4];
+ u32 t1 = ehci_readl(ehci, reg);
+
+ t1 &= (u32)~0xf0000;
+ t1 |= PORT_TEST_FORCE;
+ ehci_writel(ehci, t1, reg);
+ t1 = ehci_readl(ehci, reg);
+ msleep(1);
+ t1 &= (u32)~0xf0000;
+ ehci_writel(ehci, t1, reg);
+ ehci_readl(ehci, reg);
+ msleep(1);
+ t1 = ehci_readl(ehci, reg);
+ ehci_writel(ehci, t1 | PORT_CSC, reg);
+ ehci_readl(ehci, reg);
+}
+
/* suspend/resume, section 4.3 */
/* These routines handle the generic parts of controller suspend/resume */
@@ -1154,6 +1174,9 @@ int ehci_resume(struct usb_hcd *hcd, bool force_reset)
if (ehci->shutdown)
return 0; /* Controller is dead */
+ if (ehci->zx_wakeup_clear_needed)
+ ehci_zx_wakeup_clear(ehci);
+
/*
* If CF is still set and reset isn't forced
* then we maintained suspend power.
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 638f03b89739..9937c5a7efc2 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -231,6 +231,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
ehci->is_aspeed = 1;
}
break;
+ case PCI_VENDOR_ID_ZHAOXIN:
+ if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90)
+ ehci->zx_wakeup_clear_needed = 1;
+ break;
}
/* optional debug port, normally in the first BAR */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index fdd073cc053b..ad3f13a3eaf1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -220,6 +220,7 @@ struct ehci_hcd { /* one per controller */
unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
unsigned spurious_oc:1;
unsigned is_aspeed:1;
+ unsigned zx_wakeup_clear_needed:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1e7dc130c39a..f65f1ba2b592 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1434,7 +1434,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
spin_unlock_irqrestore(&xhci->lock, flags);
if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
- msecs_to_jiffies(100)))
+ msecs_to_jiffies(500)))
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
hcd->self.busnum, wIndex + 1);
spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5c351970cdf1..d7e0e6ebf080 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
@@ -266,7 +267,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d0b6806275e0..f9707997969d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3141,6 +3141,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
xhci_update_erst_dequeue(xhci, event_ring_deq);
+ event_ring_deq = xhci->event_ring->dequeue;
/* ring is half-full, force isoc trbs to interrupt more often */
if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index c8af2cd2216d..996958a6565c 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1034,13 +1034,13 @@ static int tegra_xusb_unpowergate_partitions(struct tegra_xusb *tegra)
int rc;
if (tegra->use_genpd) {
- rc = pm_runtime_get_sync(tegra->genpd_dev_ss);
+ rc = pm_runtime_resume_and_get(tegra->genpd_dev_ss);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB SS partition\n");
return rc;
}
- rc = pm_runtime_get_sync(tegra->genpd_dev_host);
+ rc = pm_runtime_resume_and_get(tegra->genpd_dev_host);
if (rc < 0) {
dev_err(dev, "failed to enable XUSB Host partition\n");
pm_runtime_put_sync(tegra->genpd_dev_ss);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 642610c78f58..25b87e99b4dd 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -781,6 +781,17 @@ void xhci_shutdown(struct usb_hcd *hcd)
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
+ /* Don't poll the roothubs after shutdown. */
+ xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
+ __func__, hcd->self.busnum);
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
+ if (xhci->shared_hcd) {
+ clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ del_timer_sync(&xhci->shared_hcd->rh_timer);
+ }
+
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
/* Workaround for spurious wakeups at shutdown with HSW */
diff --git a/drivers/usb/misc/qcom_eud.c b/drivers/usb/misc/qcom_eud.c
index f929bffdc5d1..b7f13df00764 100644
--- a/drivers/usb/misc/qcom_eud.c
+++ b/drivers/usb/misc/qcom_eud.c
@@ -186,16 +186,16 @@ static int eud_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
- ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip);
- if (ret)
- return dev_err_probe(chip->dev, ret,
- "failed to add role switch release action\n");
-
chip->role_sw = usb_role_switch_get(&pdev->dev);
if (IS_ERR(chip->role_sw))
return dev_err_probe(chip->dev, PTR_ERR(chip->role_sw),
"failed to get role switch\n");
+ ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip);
+ if (ret)
+ return dev_err_probe(chip->dev, ret,
+ "failed to add role switch release action\n");
+
chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 748139d26263..0be8efcda15d 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -71,6 +71,7 @@ static void destroy_priv(struct kref *kref)
dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n");
usb_put_dev(priv->usbdev);
+ priv->usbdev = NULL;
kfree(priv);
}
@@ -736,7 +737,6 @@ static int uss720_probe(struct usb_interface *intf,
parport_announce_port(pp);
usb_set_intfdata(intf, pp);
- usb_put_dev(usbdev);
return 0;
probe_abort:
@@ -754,7 +754,6 @@ static void uss720_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (pp) {
priv = pp->private_data;
- priv->usbdev = NULL;
priv->pp = NULL;
dev_dbg(&intf->dev, "parport_remove_port\n");
parport_remove_port(pp);
diff --git a/drivers/usb/mtu3/mtu3_dr.c b/drivers/usb/mtu3/mtu3_dr.c
index a6b04831b20b..9b8aded3d95e 100644
--- a/drivers/usb/mtu3/mtu3_dr.c
+++ b/drivers/usb/mtu3/mtu3_dr.c
@@ -21,10 +21,8 @@ static inline struct ssusb_mtk *otg_sx_to_ssusb(struct otg_switch_mtk *otg_sx)
static void toggle_opstate(struct ssusb_mtk *ssusb)
{
- if (!ssusb->otg_switch.is_u3_drd) {
- mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
- mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
- }
+ mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION);
+ mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN);
}
/* only port0 supports dual-role mode */
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 661a229c105d..34b9f8140187 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -268,6 +268,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
return -EPROBE_DEFER;
}
+ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ nop->vbus_draw = NULL;
+ if (IS_ERR(nop->vbus_draw))
+ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
+ "could not get vbus regulator\n");
+
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a27f7efcec6a..c374620a486f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -194,6 +194,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
{ USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
{ USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
+ { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */
+ { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e7755d9cfc61..1364ce7f0abf 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -432,6 +432,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
+#define CINTERION_PRODUCT_MV32_WA 0x00f1
+#define CINTERION_PRODUCT_MV32_WB 0x00f2
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1217,6 +1219,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
@@ -1233,6 +1239,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
+ .driver_info = RSVD(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1969,6 +1977,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
+ .driver_info = RSVD(3)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index da65d14c9ed5..06aad0d727dd 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -584,9 +584,8 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
switch (command) {
case WHITEHEAT_GET_DTR_RTS:
info = usb_get_serial_port_data(port);
- memcpy(&info->mcr, command_info->result_buffer,
- sizeof(struct whiteheat_dr_info));
- break;
+ info->mcr = command_info->result_buffer[0];
+ break;
}
}
exit:
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 8f921213b17d..ba24847fb245 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -56,6 +56,7 @@ config TYPEC_RT1719
tristate "Richtek RT1719 Sink Only Type-C controller driver"
depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
depends on I2C
+ depends on POWER_SUPPLY
select REGMAP_I2C
help
Say Y or M here if your system has Richtek RT1719 sink only
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index f0c2fa19f3e0..a6045aef0d04 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -949,6 +949,8 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
role == TYPEC_HOST))
goto out_unlock;
+ reinit_completion(&con->complete);
+
command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_UOR_ROLE(role);
command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS;
@@ -956,14 +958,18 @@ static int ucsi_dr_swap(struct typec_port *port, enum typec_data_role role)
if (ret < 0)
goto out_unlock;
+ mutex_unlock(&con->lock);
+
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
- ret = -ETIMEDOUT;
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+ return -ETIMEDOUT;
+
+ return 0;
out_unlock:
mutex_unlock(&con->lock);
- return ret < 0 ? ret : 0;
+ return ret;
}
static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
@@ -985,6 +991,8 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
if (cur_role == role)
goto out_unlock;
+ reinit_completion(&con->complete);
+
command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_SET_PDR_ROLE(role);
command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS;
@@ -992,11 +1000,13 @@ static int ucsi_pr_swap(struct typec_port *port, enum typec_role role)
if (ret < 0)
goto out_unlock;
+ mutex_unlock(&con->lock);
+
if (!wait_for_completion_timeout(&con->complete,
- msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out_unlock;
- }
+ msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS)))
+ return -ETIMEDOUT;
+
+ mutex_lock(&con->lock);
/* Something has gone wrong while swapping the role */
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index b7bb16f92ac6..06b6f3594a13 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -36,6 +36,10 @@ static bool nointxmask;
static bool disable_vga;
static bool disable_idle_d3;
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
static inline bool vfio_vga_disabled(void)
{
#ifdef CONFIG_VFIO_PCI_VGA
@@ -434,47 +438,17 @@ out:
}
EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
-{
- struct pci_dev *physfn = pci_physfn(vdev->pdev);
- struct vfio_device *pf_dev;
-
- if (!vdev->pdev->is_virtfn)
- return NULL;
-
- pf_dev = vfio_device_get_from_dev(&physfn->dev);
- if (!pf_dev)
- return NULL;
-
- if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
- vfio_device_put(pf_dev);
- return NULL;
- }
-
- return container_of(pf_dev, struct vfio_pci_core_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
-{
- struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
-
- if (!pf_vdev)
- return;
-
- mutex_lock(&pf_vdev->vf_token->lock);
- pf_vdev->vf_token->users += val;
- WARN_ON(pf_vdev->vf_token->users < 0);
- mutex_unlock(&pf_vdev->vf_token->lock);
-
- vfio_device_put(&pf_vdev->vdev);
-}
-
void vfio_pci_core_close_device(struct vfio_device *core_vdev)
{
struct vfio_pci_core_device *vdev =
container_of(core_vdev, struct vfio_pci_core_device, vdev);
- vfio_pci_vf_token_user_add(vdev, -1);
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+ vdev->sriov_pf_core_dev->vf_token->users--;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_core_disable(vdev);
@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
{
vfio_pci_probe_mmaps(vdev);
vfio_spapr_pci_eeh_open(vdev->pdev);
- vfio_pci_vf_token_user_add(vdev, 1);
+
+ if (vdev->sriov_pf_core_dev) {
+ mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ vdev->sriov_pf_core_dev->vf_token->users++;
+ mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+ }
}
EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
*
* If the VF token is provided but unused, an error is generated.
*/
- if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
- return 0; /* No VF token provided or required */
-
if (vdev->pdev->is_virtfn) {
- struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+ struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
bool match;
if (!pf_vdev) {
@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
}
if (!vf_token) {
- vfio_device_put(&pf_vdev->vdev);
pci_info_ratelimited(vdev->pdev,
"VF token required to access device\n");
return -EACCES;
@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
mutex_unlock(&pf_vdev->vf_token->lock);
- vfio_device_put(&pf_vdev->vdev);
-
if (!match) {
pci_info_ratelimited(vdev->pdev,
"Incorrect VF token provided for device\n");
@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
+ struct vfio_pci_core_device *cur;
+ struct pci_dev *physfn;
int ret;
+ if (pdev->is_virtfn) {
+ /*
+ * If this VF was created by our vfio_pci_core_sriov_configure()
+ * then we can find the PF vfio_pci_core_device now, and due to
+ * the locking in pci_disable_sriov() it cannot change until
+ * this VF device driver is removed.
+ */
+ physfn = pci_physfn(vdev->pdev);
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+ if (cur->pdev == physfn) {
+ vdev->sriov_pf_core_dev = cur;
+ break;
+ }
+ }
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+ return 0;
+ }
+
+ /* Not a SRIOV PF */
if (!pdev->is_physfn)
return 0;
@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
+ INIT_LIST_HEAD(&vdev->sriov_pfs_item);
init_rwsem(&vdev->memory_lock);
}
EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
- pci_disable_sriov(pdev);
+ vfio_pci_core_sriov_configure(pdev, 0);
vfio_unregister_group_dev(&vdev->vdev);
@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
{
+ struct vfio_pci_core_device *vdev;
struct vfio_device *device;
int ret = 0;
+ device_lock_assert(&pdev->dev);
+
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -ENODEV;
- if (nr_virtfn == 0)
- pci_disable_sriov(pdev);
- else
+ vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+ if (nr_virtfn) {
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ /*
+ * The thread that adds the vdev to the list is the only thread
+ * that gets to call pci_enable_sriov() and we will only allow
+ * it to be called once without going through
+ * pci_disable_sriov()
+ */
+ if (!list_empty(&vdev->sriov_pfs_item)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
ret = pci_enable_sriov(pdev, nr_virtfn);
+ if (ret)
+ goto out_del;
+ ret = nr_virtfn;
+ goto out_put;
+ }
- vfio_device_put(device);
+ pci_disable_sriov(pdev);
- return ret < 0 ? ret : nr_virtfn;
+out_del:
+ mutex_lock(&vfio_pci_sriov_pfs_mutex);
+ list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+ mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+out_put:
+ vfio_device_put(device);
+ return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index edf169d0816e..eb3e47c58c5f 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -566,6 +566,9 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
int rv, mem, step;
+ if (!var->pixclock)
+ return -EINVAL;
+
/* Find appropriate format */
rv = svga_match_format (arkfb_formats, var, NULL);
if (rv < 0)
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 6ff16d3132e5..b26c81233b6b 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -68,7 +68,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-#include <asm/prom.h>
#include "../macmodes.h"
#endif
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 1aef3d6ebd88..a3e6faed7745 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -79,7 +79,6 @@
#ifdef __powerpc__
#include <asm/machdep.h>
-#include <asm/prom.h>
#include "../macmodes.h"
#endif
#ifdef __sparc__
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index b5fbd5329652..97a5972f5b1f 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -22,7 +22,6 @@
#ifdef CONFIG_PPC_PMAC
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/pmac_feature.h>
#endif
diff --git a/drivers/video/fbdev/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h
index 93f403cbb415..91d81b576231 100644
--- a/drivers/video/fbdev/aty/radeonfb.h
+++ b/drivers/video/fbdev/aty/radeonfb.h
@@ -21,7 +21,7 @@
#include <asm/io.h>
-#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
+#ifdef CONFIG_SPARC
#include <asm/prom.h>
#endif
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index c5d15c6db287..771ce1f76951 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -268,8 +268,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
goto out_fb_release;
}
- cfb->syscon =
- syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
+ cfb->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(cfb->syscon)) {
ret = PTR_ERR(cfb->syscon);
goto out_fb_release;
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index bd59e7b11ed5..aba46118b208 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -47,9 +47,6 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index 52cce0db8bd3..09dd85553d4f 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -657,6 +657,9 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
+ if (!var->pixclock)
+ return -EINVAL;
+
switch (var->bits_per_pixel) {
case 8:
var->red.offset = var->green.offset = var->blue.offset = 0;
@@ -740,7 +743,7 @@ static int i740fb_set_par(struct fb_info *info)
if (i)
return i;
- memset(info->screen_base, 0, info->screen_size);
+ memset_io(info->screen_base, 0, info->screen_size);
vga_protect(par);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 68288756ffff..a2f644c97f28 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -925,10 +925,12 @@ static int imxfb_probe(struct platform_device *pdev)
sizeof(struct imx_fb_videomode), GFP_KERNEL);
if (!fbi->mode) {
ret = -ENOMEM;
+ of_node_put(display_np);
goto failed_of_parse;
}
ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+ of_node_put(display_np);
if (ret)
goto failed_of_parse;
}
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 25801e8e3f74..d57772f96ad2 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -494,6 +494,8 @@ static int kyrofb_set_par(struct fb_info *info)
info->var.hsync_len +
info->var.left_margin)) / 1000;
+ if (!lineclock)
+ return -EINVAL;
/* time for a frame in ns (precision in 32bpp) */
frameclock = lineclock * (info->var.yres +
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
index 759dee996af1..958be6805f87 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.h
+++ b/drivers/video/fbdev/matrox/matroxfb_base.h
@@ -47,7 +47,6 @@
#include <asm/unaligned.h>
#if defined(CONFIG_PPC_PMAC)
-#include <asm/prom.h>
#include "../macmodes.h"
#endif
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 63721337a377..a7508f5be343 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -18,6 +18,8 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#if defined(CONFIG_OF)
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#endif
#include "mb862xxfb.h"
diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index 154127256a2c..03707461eced 100644
--- a/drivers/video/fbdev/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
@@ -127,19 +127,18 @@ EXPORT_SYMBOL_GPL(mmp_unregister_panel);
*/
struct mmp_path *mmp_get_path(const char *name)
{
- struct mmp_path *path;
- int found = 0;
+ struct mmp_path *path = NULL, *iter;
mutex_lock(&disp_lock);
- list_for_each_entry(path, &path_list, node) {
- if (!strcmp(name, path->name)) {
- found = 1;
+ list_for_each_entry(iter, &path_list, node) {
+ if (!strcmp(name, iter->name)) {
+ path = iter;
break;
}
}
mutex_unlock(&disp_lock);
- return found ? path : NULL;
+ return path;
}
EXPORT_SYMBOL_GPL(mmp_get_path);
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index 966df2a07360..28d32cbf496b 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -585,7 +585,7 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
DBG("neofb_check_var");
- if (var->pixclock && PICOS2KHZ(var->pixclock) > par->maxClock)
+ if (!var->pixclock || PICOS2KHZ(var->pixclock) > par->maxClock)
return -EINVAL;
/* Is the mode larger than the LCD panel? */
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index b191bef22d98..9d9fe5c3a7a1 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -964,7 +964,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
goto err3;
hwa742.extif->set_timings(&hwa742.reg_timings);
- clk_enable(hwa742.sys_ck);
+ clk_prepare_enable(hwa742.sys_ck);
calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
@@ -1023,7 +1023,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
return 0;
err4:
- clk_disable(hwa742.sys_ck);
+ clk_disable_unprepare(hwa742.sys_ck);
err3:
hwa742.extif->cleanup();
err2:
@@ -1037,7 +1037,7 @@ static void hwa742_cleanup(void)
hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
hwa742.extif->cleanup();
hwa742.int_ctrl->cleanup();
- clk_disable(hwa742.sys_ck);
+ clk_disable_unprepare(hwa742.sys_ck);
}
struct lcd_ctrl hwa742_ctrl = {
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index 7317c9aad677..97d20dc0d1d0 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -711,7 +711,7 @@ static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
dev_err(fbdev->dev, "failed to adjust LCD rate\n");
goto fail1;
}
- clk_enable(lcdc.lcd_ck);
+ clk_prepare_enable(lcdc.lcd_ck);
r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
if (r) {
@@ -746,7 +746,7 @@ fail4:
fail3:
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
fail2:
- clk_disable(lcdc.lcd_ck);
+ clk_disable_unprepare(lcdc.lcd_ck);
fail1:
clk_put(lcdc.lcd_ck);
fail0:
@@ -760,7 +760,7 @@ static void omap_lcdc_cleanup(void)
free_fbmem();
omap_free_lcd_dma();
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
- clk_disable(lcdc.lcd_ck);
+ clk_disable_unprepare(lcdc.lcd_ck);
clk_put(lcdc.lcd_ck);
}
diff --git a/drivers/video/fbdev/omap/sossi.c b/drivers/video/fbdev/omap/sossi.c
index 80ac67f27f0d..b9cb8b386627 100644
--- a/drivers/video/fbdev/omap/sossi.c
+++ b/drivers/video/fbdev/omap/sossi.c
@@ -598,7 +598,7 @@ static int sossi_init(struct omapfb_device *fbdev)
l &= ~CONF_SOSSI_RESET_R;
omap_writel(l, MOD_CONF_CTRL_1);
- clk_enable(sossi.fck);
+ clk_prepare_enable(sossi.fck);
l = omap_readl(ARM_IDLECT2);
l &= ~(1 << 8); /* DMACK_REQ */
omap_writel(l, ARM_IDLECT2);
@@ -649,7 +649,7 @@ static int sossi_init(struct omapfb_device *fbdev)
return 0;
err:
- clk_disable(sossi.fck);
+ clk_disable_unprepare(sossi.fck);
clk_put(sossi.fck);
return r;
}
@@ -657,6 +657,7 @@ err:
static void sossi_cleanup(void)
{
omap_lcdc_free_dma_callback();
+ clk_unprepare(sossi.fck);
clk_put(sossi.fck);
iounmap(sossi.base);
}
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index ce413a9df06e..5b9e26ea6449 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -30,9 +30,9 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/nvram.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <asm/prom.h>
#include "macmodes.h"
#include "platinumfb.h"
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index c68725eebee3..d3be2c64f1c0 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -1504,9 +1504,7 @@ static const struct fb_ops pm2fb_ops = {
/**
- * Device initialisation
- *
- * Initialise and allocate resource for PCI device.
+ * pm2fb_probe - Initialise and allocate resource for PCI device.
*
* @pdev: PCI device.
* @id: PCI device ID.
@@ -1711,9 +1709,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/**
- * Device removal.
- *
- * Release all device resources.
+ * pm2fb_remove - Release all device resources.
*
* @pdev: PCI device to clean up.
*/
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index f1551e00eb12..8ad91c251fe6 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -2256,10 +2256,10 @@ static int pxafb_probe(struct platform_device *dev)
goto failed;
for (i = 0; i < inf->num_modes; i++)
inf->modes[i] = pdata->modes[i];
+ } else {
+ inf = of_pxafb_of_mach_info(&dev->dev);
}
- if (!pdata)
- inf = of_pxafb_of_mach_info(&dev->dev);
if (IS_ERR_OR_NULL(inf))
goto failed;
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 5c74253e7b2c..b93c8eb02336 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -549,6 +549,9 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
int rv, mem, step;
u16 m, n, r;
+ if (!var->pixclock)
+ return -EINVAL;
+
/* Find appropriate format */
rv = svga_match_format (s3fb_formats, var, NULL);
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index aa4ebe3192ec..9a4417430b4e 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -531,9 +531,6 @@ static void sh_mobile_lcdc_display_off(struct sh_mobile_lcdc_chan *ch)
ch->tx_dev->ops->display_off(ch->tx_dev);
}
-static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-
/* -----------------------------------------------------------------------------
* Format helpers
*/
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 742f62986b80..f28fd69d5eb7 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -4463,7 +4463,7 @@ static void sisfb_post_sis300(struct pci_dev *pdev)
SiS_SetReg(SISCR, 0x37, 0x02);
SiS_SetReg(SISPART2, 0x00, 0x1c);
v4 = 0x00; v5 = 0x00; v6 = 0x10;
- if(ivideo->SiS_Pr.UseROM) {
+ if (ivideo->SiS_Pr.UseROM && bios) {
v4 = bios[0xf5];
v5 = bios[0xf6];
v6 = bios[0xf7];
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 4d20cb557ff0..319131bd72cf 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -996,6 +996,9 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
int ramdac = 230000; /* 230MHz for most 3D chips */
debug("enter\n");
+ if (!var->pixclock)
+ return -EINVAL;
+
/* check color depth */
if (bpp == 24)
bpp = var->bits_per_pixel = 32;
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index b6ec0b8e2b72..d280733f283b 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -1650,8 +1650,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
const struct device_attribute *attr;
struct dlfb_data *dlfb;
struct fb_info *info;
- int retval = -ENOMEM;
+ int retval;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *out;
/* usb initialization */
dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1665,6 +1666,12 @@ static int dlfb_usb_probe(struct usb_interface *intf,
dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
+ retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
+ if (retval) {
+ dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+ goto error;
+ }
+
dev_dbg(&intf->dev, "console enable=%d\n", console);
dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio);
dev_dbg(&intf->dev, "shadow enable=%d\n", shadow);
@@ -1674,6 +1681,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
if (!dlfb_parse_vendor_descriptor(dlfb, intf)) {
dev_err(&intf->dev,
"firmware not recognized, incompatible device?\n");
+ retval = -ENODEV;
goto error;
}
@@ -1687,8 +1695,10 @@ static int dlfb_usb_probe(struct usb_interface *intf,
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error;
+ }
dlfb->info = info;
info->par = dlfb;
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 8425afe37d7c..a6c9d4f26669 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -54,10 +54,9 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
+#include <linux/of_address.h>
#ifdef CONFIG_MAC
#include <asm/macintosh.h>
-#else
-#include <asm/prom.h>
#endif
#include "macmodes.h"
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 7a959e5ba90b..a92a8c670cf0 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -321,6 +321,9 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
{
int rv, mem, step;
+ if (!var->pixclock)
+ return -EINVAL;
+
/* Find appropriate format */
rv = svga_match_format (vt8623fb_formats, var, NULL);
if (rv < 0)
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index f93b6abbe258..bebd371c6b93 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -199,7 +199,7 @@ struct display_timings *of_get_display_timings(const struct device_node *np)
struct display_timing *dt;
int r;
- dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+ dt = kmalloc(sizeof(*dt), GFP_KERNEL);
if (!dt) {
pr_err("%pOF: could not allocate display_timing struct\n",
np);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index dfe26fa17e95..617a7f4f07a8 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_ballooned_pages);
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-static void __init balloon_add_region(unsigned long start_pfn,
- unsigned long pages)
+static void __init balloon_add_regions(void)
{
+#if defined(CONFIG_XEN_PV)
+ unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
+ unsigned int i;
- /*
- * If the amount of usable memory has been limited (e.g., with
- * the 'mem' command line parameter), don't add pages beyond
- * this limit.
- */
- extra_pfn_end = min(max_pfn, start_pfn + pages);
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ pages = xen_extra_mem[i].n_pfns;
+ if (!pages)
+ continue;
- for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
- /* totalram_pages and totalhigh_pages do not
- include the boot-time balloon extension, so
- don't subtract from it. */
- balloon_append(pfn_to_page(pfn));
- }
+ start_pfn = xen_extra_mem[i].start_pfn;
- balloon_stats.total_pages += extra_pfn_end - start_pfn;
-}
+ /*
+ * If the amount of usable memory has been limited (e.g., with
+ * the 'mem' command line parameter), don't add pages beyond
+ * this limit.
+ */
+ extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+ for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+ balloon_append(pfn_to_page(pfn));
+
+ balloon_stats.total_pages += extra_pfn_end - start_pfn;
+ }
#endif
+}
static int __init balloon_init(void)
{
@@ -745,20 +750,7 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root);
#endif
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
- {
- int i;
-
- /*
- * Initialize the balloon with pages from the extra memory
- * regions (see arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
- if (xen_extra_mem[i].n_pfns)
- balloon_add_region(xen_extra_mem[i].start_pfn,
- xen_extra_mem[i].n_pfns);
- }
-#endif
+ balloon_add_regions();
task = kthread_run(balloon_thread, NULL, "xen-balloon");
if (IS_ERR(task)) {
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 4849f94372a4..55acb32842a3 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -178,9 +178,9 @@ static void __del_gref(struct gntalloc_gref *gref)
unsigned long addr;
if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
- uint8_t *tmp = kmap(gref->page);
+ uint8_t *tmp = kmap_local_page(gref->page);
tmp[gref->notify.pgoff] = 0;
- kunmap(gref->page);
+ kunmap_local(tmp);
}
if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(gref->notify.event);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index a8b41057c382..a39f2d36dd9c 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(xen_free_unpopulated_pages);
-#ifdef CONFIG_XEN_PV
-static int __init init(void)
-{
- unsigned int i;
-
- if (!xen_domain())
- return -ENODEV;
-
- if (!xen_pv_domain())
- return 0;
-
- /*
- * Initialize with pages from the extra memory regions (see
- * arch/x86/xen/setup.c).
- */
- for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
- unsigned int j;
-
- for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
- struct page *pg =
- pfn_to_page(xen_extra_mem[i].start_pfn + j);
-
- pg->zone_device_data = page_list;
- page_list = pg;
- list_count++;
- }
- }
-
- return 0;
-}
-subsys_initcall(init);
-#endif
-
static int __init unpopulated_init(void)
{
int ret;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 6bcf1475511b..4763132ca57e 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -616,8 +616,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
_debug("write discard %x @%llx [%llx]", len, start, i_size);
/* The dirty region was entirely beyond the EOF. */
- fscache_clear_page_bits(afs_vnode_cache(vnode),
- mapping, start, len, caching);
+ fscache_clear_page_bits(mapping, start, len, caching);
afs_pages_written_back(vnode, start, len);
ret = 0;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6556e13ed95f..63c7ebb0da89 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1117,11 +1117,11 @@ out_free_interp:
* independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
- alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
- if (interpreter || alignment > ELF_MIN_ALIGN) {
+ if (interpreter) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
+ alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index c22d287e020b..0dd6de994199 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -2503,12 +2503,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
- /*
- * New block group is likely to be used soon. Try to activate it now.
- * Failure is OK for now.
- */
- btrfs_zone_activate(cache);
-
ret = exclude_super_stripes(cache);
if (ret) {
/* We may have excluded something, so call this just in case */
@@ -2946,7 +2940,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_path *path = NULL;
LIST_HEAD(dirty);
struct list_head *io = &cur_trans->io_bgs;
- int num_started = 0;
int loops = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
@@ -3012,7 +3005,6 @@ again:
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
- num_started++;
should_put = 0;
/*
@@ -3113,7 +3105,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
int should_put;
struct btrfs_path *path;
struct list_head *io = &cur_trans->io_bgs;
- int num_started = 0;
path = btrfs_alloc_path();
if (!path)
@@ -3171,7 +3162,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
cache->io_ctl.inode = NULL;
ret = btrfs_write_out_cache(trans, cache, path);
if (ret == 0 && cache->io_ctl.inode) {
- num_started++;
should_put = 0;
list_add_tail(&cache->io_list, io);
} else {
@@ -3455,7 +3445,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
}
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
{
struct btrfs_block_group *bg;
int ret;
@@ -3542,7 +3532,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
out:
btrfs_trans_release_chunk_metadata(trans);
- return ret;
+ if (ret)
+ return ERR_PTR(ret);
+
+ btrfs_get_block_group(bg);
+ return bg;
}
/*
@@ -3657,10 +3651,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_space_info *space_info;
+ struct btrfs_block_group *ret_bg;
bool wait_for_alloc = false;
bool should_alloc = false;
+ bool from_extent_allocation = false;
int ret = 0;
+ if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
+ from_extent_allocation = true;
+ force = CHUNK_ALLOC_FORCE;
+ }
+
/* Don't re-enter if we're already allocating a chunk */
if (trans->allocating_chunk)
return -ENOSPC;
@@ -3750,9 +3751,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
force_metadata_allocation(fs_info);
}
- ret = do_chunk_alloc(trans, flags);
+ ret_bg = do_chunk_alloc(trans, flags);
trans->allocating_chunk = false;
+ if (IS_ERR(ret_bg)) {
+ ret = PTR_ERR(ret_bg);
+ } else if (from_extent_allocation) {
+ /*
+ * New block group is likely to be used soon. Try to activate
+ * it now. Failure is OK for now.
+ */
+ btrfs_zone_activate(ret_bg);
+ }
+
+ if (!ret)
+ btrfs_put_block_group(ret_bg);
+
spin_lock(&space_info->lock);
if (ret < 0) {
if (ret == -ENOSPC)
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 93aabc68bb6a..e8308f2ad07d 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -35,11 +35,15 @@ enum btrfs_discard_state {
* the FS with empty chunks
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
+ * find_free_extent() that also activaes the zone
*/
enum btrfs_chunk_alloc_enum {
CHUNK_ALLOC_NO_FORCE,
CHUNK_ALLOC_LIMITED,
CHUNK_ALLOC_FORCE,
+ CHUNK_ALLOC_FORCE_FOR_EXTENT,
};
struct btrfs_caching_control {
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index be476f094300..19bf36d8ffea 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -537,6 +537,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
+ if (blkcg_css)
+ kthread_associate_blkcg(blkcg_css);
+
while (cur_disk_bytenr < disk_start + compressed_len) {
u64 offset = cur_disk_bytenr - disk_start;
unsigned int index = offset >> PAGE_SHIFT;
@@ -555,6 +558,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
bio = NULL;
goto finish_cb;
}
+ if (blkcg_css)
+ bio->bi_opf |= REQ_CGROUP_PUNT;
}
/*
* We should never reach next_stripe_start start as we will
@@ -612,6 +617,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
return 0;
finish_cb:
+ if (blkcg_css)
+ kthread_associate_blkcg(NULL);
+
if (bio) {
bio->bi_status = ret;
bio_endio(bio);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b7631b88426e..077c95e9baa5 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1060,6 +1060,7 @@ struct btrfs_fs_info {
*/
spinlock_t relocation_bg_lock;
u64 data_reloc_bg;
+ struct mutex zoned_data_reloc_io_lock;
u64 nr_global_roots;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 71fd99b48283..f26202621989 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -734,7 +734,12 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
- /* Commit dev_replace state and reserve 1 item for it. */
+ /*
+ * Commit dev_replace state and reserve 1 item for it.
+ * This is crucial to ensure we won't miss copying extents for new block
+ * groups that are allocated after we started the device replace, and
+ * must be done after setting up the device replace state.
+ */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b30309f187cf..ed8e288cc369 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1850,9 +1850,10 @@ again:
ret = btrfs_insert_fs_root(fs_info, root);
if (ret) {
- btrfs_put_root(root);
- if (ret == -EEXIST)
+ if (ret == -EEXIST) {
+ btrfs_put_root(root);
goto again;
+ }
goto fail;
}
return root;
@@ -3156,6 +3157,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
mutex_init(&fs_info->reloc_mutex);
mutex_init(&fs_info->delalloc_root_mutex);
mutex_init(&fs_info->zoned_meta_io_lock);
+ mutex_init(&fs_info->zoned_data_reloc_io_lock);
seqlock_init(&fs_info->profiles_lock);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f477035a2ac2..6aa92f84f465 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4082,7 +4082,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
}
ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
- CHUNK_ALLOC_FORCE);
+ CHUNK_ALLOC_FORCE_FOR_EXTENT);
/* Do not bail out on ENOSPC since we can do more. */
if (ret == -ENOSPC)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 724e8fe06aa0..33c19f51d79b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2658,6 +2658,7 @@ int btrfs_repair_one_sector(struct inode *inode,
repair_bio = btrfs_bio_alloc(1);
repair_bbio = btrfs_bio(repair_bio);
+ repair_bbio->file_offset = start;
repair_bio->bi_opf = REQ_OP_READ;
repair_bio->bi_end_io = failed_bio->bi_end_io;
repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
@@ -3333,24 +3334,37 @@ static int alloc_new_bio(struct btrfs_inode *inode,
ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
if (ret < 0)
goto error;
- if (wbc) {
- struct block_device *bdev;
- bdev = fs_info->fs_devices->latest_dev->bdev;
- bio_set_dev(bio, bdev);
- wbc_init_bio(wbc, bio);
- }
- if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
- struct btrfs_device *device;
+ if (wbc) {
+ /*
+ * For Zone append we need the correct block_device that we are
+ * going to write to set in the bio to be able to respect the
+ * hardware limitation. Look it up here:
+ */
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ struct btrfs_device *dev;
+
+ dev = btrfs_zoned_get_device(fs_info, disk_bytenr,
+ fs_info->sectorsize);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto error;
+ }
- device = btrfs_zoned_get_device(fs_info, disk_bytenr,
- fs_info->sectorsize);
- if (IS_ERR(device)) {
- ret = PTR_ERR(device);
- goto error;
+ bio_set_dev(bio, dev->bdev);
+ } else {
+ /*
+ * Otherwise pick the last added device to support
+ * cgroup writeback. For multi-device file systems this
+ * means blk-cgroup policies have to always be set on the
+ * last added/replaced device. This is a bit odd but has
+ * been like that for a long time.
+ */
+ bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
}
-
- btrfs_bio(bio)->device = device;
+ wbc_init_bio(wbc, bio);
+ } else {
+ ASSERT(bio_op(bio) != REQ_OP_ZONE_APPEND);
}
return 0;
error:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 17d5557f98ec..1c8a43ecfb9f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2016,8 +2016,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
* to use run_delalloc_nocow() here, like for regular
* preallocated inodes.
*/
- ASSERT(!zoned ||
- (zoned && btrfs_is_data_reloc_root(inode->root)));
+ ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, nr_written);
} else if (!inode_can_compress(inode) ||
@@ -7444,6 +7443,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
u64 block_start, orig_start, orig_block_len, ram_bytes;
bool can_nocow = false;
bool space_reserved = false;
+ u64 prev_len;
int ret = 0;
/*
@@ -7471,6 +7471,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
can_nocow = true;
}
+ prev_len = len;
if (can_nocow) {
struct extent_map *em2;
@@ -7500,8 +7501,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
goto out;
}
} else {
- const u64 prev_len = len;
-
/* Our caller expects us to free the input extent map. */
free_extent_map(em);
*map = NULL;
@@ -7532,7 +7531,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
* We have created our ordered extent, so we can now release our reservation
* for an outstanding extent.
*/
- btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
/*
* Need to update the i_size under the extent lock so buffered
@@ -7811,8 +7810,6 @@ static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
struct bio_vec bvec;
struct bvec_iter iter;
- const u64 orig_file_offset = dip->file_offset;
- u64 start = orig_file_offset;
u32 bio_offset = 0;
blk_status_t err = BLK_STS_OK;
@@ -7822,6 +7819,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
pgoff = bvec.bv_offset;
for (i = 0; i < nr_sectors; i++) {
+ u64 start = bbio->file_offset + bio_offset;
+
ASSERT(pgoff < PAGE_SIZE);
if (uptodate &&
(!csum || !check_data_csum(inode, bbio,
@@ -7834,17 +7833,13 @@ static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
} else {
int ret;
- ASSERT((start - orig_file_offset) < UINT_MAX);
- ret = btrfs_repair_one_sector(inode,
- &bbio->bio,
- start - orig_file_offset,
- bvec.bv_page, pgoff,
+ ret = btrfs_repair_one_sector(inode, &bbio->bio,
+ bio_offset, bvec.bv_page, pgoff,
start, bbio->mirror_num,
submit_dio_repair_bio);
if (ret)
err = errno_to_blk_status(ret);
}
- start += sectorsize;
ASSERT(bio_offset + sectorsize > bio_offset);
bio_offset += sectorsize;
pgoff += sectorsize;
@@ -7871,6 +7866,7 @@ static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
static void btrfs_end_dio_bio(struct bio *bio)
{
struct btrfs_dio_private *dip = bio->bi_private;
+ struct btrfs_bio *bbio = btrfs_bio(bio);
blk_status_t err = bio->bi_status;
if (err)
@@ -7881,12 +7877,12 @@ static void btrfs_end_dio_bio(struct bio *bio)
bio->bi_iter.bi_size, err);
if (bio_op(bio) == REQ_OP_READ)
- err = btrfs_check_read_dio_bio(dip, btrfs_bio(bio), !err);
+ err = btrfs_check_read_dio_bio(dip, bbio, !err);
if (err)
dip->dio_bio->bi_status = err;
- btrfs_record_physical_zoned(dip->inode, dip->file_offset, bio);
+ btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio);
bio_put(bio);
btrfs_dio_private_put(dip);
@@ -8047,6 +8043,7 @@ static void btrfs_submit_direct(const struct iomap_iter *iter,
bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
+ btrfs_bio(bio)->file_offset = file_offset;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
status = extract_ordered_extent(BTRFS_I(inode), bio,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index f46e71061942..be6c24577dbe 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5456,8 +5456,6 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_fs_info(fs_info, argp);
case BTRFS_IOC_DEV_INFO:
return btrfs_ioctl_dev_info(fs_info, argp);
- case BTRFS_IOC_BALANCE:
- return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_TREE_SEARCH:
return btrfs_ioctl_tree_search(inode, argp);
case BTRFS_IOC_TREE_SEARCH_V2:
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 11089568b287..8cd713d37ad2 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3699,6 +3699,31 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!cache)
goto skip;
+ ASSERT(cache->start <= chunk_offset);
+ /*
+ * We are using the commit root to search for device extents, so
+ * that means we could have found a device extent item from a
+ * block group that was deleted in the current transaction. The
+ * logical start offset of the deleted block group, stored at
+ * @chunk_offset, might be part of the logical address range of
+ * a new block group (which uses different physical extents).
+ * In this case btrfs_lookup_block_group() has returned the new
+ * block group, and its start address is less than @chunk_offset.
+ *
+ * We skip such new block groups, because it's pointless to
+ * process them, as we won't find their extents because we search
+ * for them using the commit root of the extent tree. For a device
+ * replace it's also fine to skip it, we won't miss copying them
+ * to the target device because we have the write duplication
+ * setup through the regular write path (by btrfs_map_block()),
+ * and we have committed a transaction when we started the device
+ * replace, right after setting up the device replace state.
+ */
+ if (cache->start < chunk_offset) {
+ btrfs_put_block_group(cache);
+ goto skip;
+ }
+
if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
spin_lock(&cache->lock);
if (!cache->to_copy) {
@@ -3822,7 +3847,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_replace->item_needs_writeback = 1;
up_write(&dev_replace->rwsem);
- ASSERT(cache->start == chunk_offset);
ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
dev_extent_len);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 571dae8ad65e..09e4f1a04e6f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3188,6 +3188,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
if (ret) {
mutex_unlock(&fs_info->tree_root->log_mutex);
+ blk_finish_plug(&plug);
goto out;
}
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2cfbc74a3b4e..a8cc736731fd 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4430,10 +4430,12 @@ static int balance_kthread(void *data)
struct btrfs_fs_info *fs_info = data;
int ret = 0;
+ sb_start_write(fs_info->sb);
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
mutex_unlock(&fs_info->balance_mutex);
+ sb_end_write(fs_info->sb);
return ret;
}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index bd297f23d19e..f3e28f11cfb6 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -328,6 +328,9 @@ struct btrfs_fs_devices {
struct btrfs_bio {
unsigned int mirror_num;
+ /* for direct I/O */
+ u64 file_offset;
+
/* @device is for stripe IO submission. */
struct btrfs_device *device;
u8 *csum;
diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h
index cbf016a7bb5d..6dee76248cb4 100644
--- a/fs/btrfs/zoned.h
+++ b/fs/btrfs/zoned.h
@@ -359,7 +359,7 @@ static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode)
struct btrfs_root *root = inode->root;
if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
- btrfs_inode_lock(&inode->vfs_inode, 0);
+ mutex_lock(&root->fs_info->zoned_data_reloc_io_lock);
}
static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
@@ -367,7 +367,7 @@ static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
struct btrfs_root *root = inode->root;
if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
- btrfs_inode_unlock(&inode->vfs_inode, 0);
+ mutex_unlock(&root->fs_info->zoned_data_reloc_io_lock);
}
#endif
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index f256c8aff7bb..ca9f3e4ec4b3 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -57,6 +57,16 @@ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
trace_cachefiles_mark_inactive(object, inode);
}
+static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
+ struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+
+ inode_lock(inode);
+ __cachefiles_unmark_inode_in_use(object, dentry);
+ inode_unlock(inode);
+}
+
/*
* Unmark a backing inode and tell cachefilesd that there's something that can
* be culled.
@@ -68,9 +78,7 @@ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
struct inode *inode = file_inode(file);
if (inode) {
- inode_lock(inode);
- __cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
- inode_unlock(inode);
+ cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
atomic_long_add(inode->i_blocks, &cache->b_released);
@@ -484,7 +492,7 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
object, d_backing_inode(path.dentry), ret,
cachefiles_trace_trunc_error);
file = ERR_PTR(ret);
- goto out_dput;
+ goto out_unuse;
}
}
@@ -494,15 +502,20 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
PTR_ERR(file),
cachefiles_trace_open_error);
- goto out_dput;
+ goto out_unuse;
}
if (unlikely(!file->f_op->read_iter) ||
unlikely(!file->f_op->write_iter)) {
fput(file);
pr_notice("Cache does not support read_iter and write_iter\n");
file = ERR_PTR(-EINVAL);
+ goto out_unuse;
}
+ goto out_dput;
+
+out_unuse:
+ cachefiles_do_unmark_inode_in_use(object, path.dentry);
out_dput:
dput(path.dentry);
out:
@@ -590,14 +603,16 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
check_failed:
fscache_cookie_lookup_negative(object->cookie);
cachefiles_unmark_inode_in_use(object, file);
- if (ret == -ESTALE) {
- fput(file);
- dput(dentry);
+ fput(file);
+ dput(dentry);
+ if (ret == -ESTALE)
return cachefiles_create_file(object);
- }
+ return false;
+
error_fput:
fput(file);
error:
+ cachefiles_do_unmark_inode_in_use(object, dentry);
dput(dentry);
return false;
}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 35465109d9c4..00b087c14995 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -203,7 +203,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
if (!buf)
return false;
buf->reserved = cpu_to_be32(0);
- memcpy(buf->data, p, len);
+ memcpy(buf->data, p, volume->vcookie->coherency_len);
ret = cachefiles_inject_write_error();
if (ret == 0)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f1ad6884d4da..5c14ef04e474 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2274,6 +2274,8 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) {
s = req->r_session;
+ if (!s)
+ continue;
if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock);
for (i = 0; i < max_sessions; i++) {
@@ -2294,6 +2296,8 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) {
s = req->r_session;
+ if (!s)
+ continue;
if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock);
for (i = 0; i < max_sessions; i++) {
@@ -3870,6 +3874,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
inode, ci, mds, mseq, target);
retry:
+ down_read(&mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
@@ -3933,6 +3938,7 @@ retry:
}
spin_unlock(&ci->i_ceph_lock);
+ up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex);
/* open target session */
@@ -3958,6 +3964,7 @@ retry:
out_unlock:
spin_unlock(&ci->i_ceph_lock);
+ up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex);
if (tsession) {
mutex_unlock(&tsession->s_mutex);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index fa38c013126d..00c3de177dd6 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -4434,8 +4434,6 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
bool check_session_state(struct ceph_mds_session *s)
{
- struct ceph_fs_client *fsc = s->s_mdsc->fsc;
-
switch (s->s_state) {
case CEPH_MDS_SESSION_OPEN:
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
@@ -4444,10 +4442,6 @@ bool check_session_state(struct ceph_mds_session *s)
}
break;
case CEPH_MDS_SESSION_CLOSING:
- /* Should never reach this when not force unmounting */
- WARN_ON_ONCE(s->s_ttl &&
- READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
- fallthrough;
case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING:
case CEPH_MDS_SESSION_CLOSED:
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index a47fa44b6d52..2b1a1c029c75 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -266,22 +266,24 @@ static void cifs_kill_sb(struct super_block *sb)
* before we kill the sb.
*/
if (cifs_sb->root) {
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+ tcon = tlink_tcon(tlink);
+ if (IS_ERR(tcon))
+ continue;
+ cfid = &tcon->crfid;
+ mutex_lock(&cfid->fid_mutex);
+ if (cfid->dentry) {
+ dput(cfid->dentry);
+ cfid->dentry = NULL;
+ }
+ mutex_unlock(&cfid->fid_mutex);
+ }
+
+ /* finally release root dentry */
dput(cifs_sb->root);
cifs_sb->root = NULL;
}
- node = rb_first(root);
- while (node != NULL) {
- tlink = rb_entry(node, struct tcon_link, tl_rbnode);
- tcon = tlink_tcon(tlink);
- cfid = &tcon->crfid;
- mutex_lock(&cfid->fid_mutex);
- if (cfid->dentry) {
- dput(cfid->dentry);
- cfid->dentry = NULL;
- }
- mutex_unlock(&cfid->fid_mutex);
- node = rb_next(node);
- }
kill_anon_super(sb);
cifs_umount(cifs_sb);
@@ -944,7 +946,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
ssize_t rc;
struct inode *inode = file_inode(iocb->ki_filp);
- if (iocb->ki_filp->f_flags & O_DIRECT)
+ if (iocb->ki_flags & IOCB_DIRECT)
return cifs_user_readv(iocb, iter);
rc = cifs_revalidate_mapping(inode);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 54155eb4faac..42e14f408856 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -534,12 +534,19 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{
/* If tcp session is not an dfs connection, then reconnect to last target server */
spin_lock(&cifs_tcp_ses_lock);
- if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
+ if (!server->is_dfs_conn) {
spin_unlock(&cifs_tcp_ses_lock);
return __cifs_reconnect(server, mark_smb_session);
}
spin_unlock(&cifs_tcp_ses_lock);
+ mutex_lock(&server->refpath_lock);
+ if (!server->origin_fullpath || !server->leaf_fullpath) {
+ mutex_unlock(&server->refpath_lock);
+ return __cifs_reconnect(server, mark_smb_session);
+ }
+ mutex_unlock(&server->refpath_lock);
+
return reconnect_dfs_server(server);
}
#else
@@ -1049,7 +1056,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_hdr_credits(server->CurrentMid,
server->conn_id, server->hostname, scredits,
le16_to_cpu(shdr->CreditRequest), in_flight);
cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
@@ -3675,9 +3682,11 @@ static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
{
struct TCP_Server_Info *server = mnt_ctx->server;
+ mutex_lock(&server->refpath_lock);
server->origin_fullpath = mnt_ctx->origin_fullpath;
server->leaf_fullpath = mnt_ctx->leaf_fullpath;
server->current_fullpath = mnt_ctx->leaf_fullpath;
+ mutex_unlock(&server->refpath_lock);
mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
}
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 30e040da4f09..956f8e5cf3e7 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1422,12 +1422,14 @@ static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool
struct TCP_Server_Info *server = tcon->ses->server;
mutex_lock(&server->refpath_lock);
- if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
- __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+ if (server->origin_fullpath) {
+ if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+ server->origin_fullpath))
+ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+ }
mutex_unlock(&server->refpath_lock);
- __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
-
return 0;
}
@@ -1530,11 +1532,14 @@ static void refresh_mounts(struct cifs_ses **sessions)
list_del_init(&tcon->ulist);
mutex_lock(&server->refpath_lock);
- if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
- __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+ if (server->origin_fullpath) {
+ if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+ server->origin_fullpath))
+ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
+ }
mutex_unlock(&server->refpath_lock);
- __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
cifs_put_tcon(tcon);
}
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 852e54ee82c2..bbdf3281559c 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -85,6 +85,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
if (rc != 1)
return -EINVAL;
+ if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+ return -EINVAL;
+
rc = symlink_hash(link_len, link_str, md5_hash);
if (rc) {
cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index db23f5b404ba..d6aaeff4a30a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -86,6 +86,9 @@ smb2_add_credits(struct TCP_Server_Info *server,
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
pr_warn_once("server overflowed SMB3 credits\n");
+ trace_smb3_overflow_credits(server->CurrentMid,
+ server->conn_id, server->hostname, *val,
+ add, server->in_flight);
}
server->in_flight--;
if (server->in_flight == 0 &&
@@ -251,7 +254,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_wait_credits(server->CurrentMid,
server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
__func__, credits->value, scredits);
@@ -300,7 +303,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_adj_credits(server->CurrentMid,
server->conn_id, server->hostname, scredits,
credits->value - new_val, in_flight);
cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
@@ -1855,9 +1858,17 @@ smb2_copychunk_range(const unsigned int xid,
int chunks_copied = 0;
bool chunk_sizes_updated = false;
ssize_t bytes_written, total_bytes_written = 0;
+ struct inode *inode;
pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
+ /*
+ * We need to flush all unwritten data before we can send the
+ * copychunk ioctl to the server.
+ */
+ inode = d_inode(trgtfile->dentry);
+ filemap_write_and_wait(inode->i_mapping);
+
if (pcchunk == NULL)
return -ENOMEM;
@@ -2492,7 +2503,7 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_pend_credits(server->CurrentMid,
server->conn_id, server->hostname, scredits,
le16_to_cpu(shdr->CreditRequest), in_flight);
cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 6cecf302dcfd..bc279616c513 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -1006,6 +1006,13 @@ DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
DEFINE_SMB3_CREDIT_EVENT(add_credits);
+DEFINE_SMB3_CREDIT_EVENT(adj_credits);
+DEFINE_SMB3_CREDIT_EVENT(hdr_credits);
+DEFINE_SMB3_CREDIT_EVENT(nblk_credits);
+DEFINE_SMB3_CREDIT_EVENT(pend_credits);
+DEFINE_SMB3_CREDIT_EVENT(wait_credits);
+DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
+DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
DEFINE_SMB3_CREDIT_EVENT(set_credits);
#endif /* _CIFS_TRACE_H */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index eeb1a699bd6f..c667e6ddfe2f 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -464,13 +464,12 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
return -EIO;
}
- tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
+ tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
if (!tr_hdr)
return -ENOMEM;
memset(&cur_rqst[0], 0, sizeof(cur_rqst));
memset(&iov, 0, sizeof(iov));
- memset(tr_hdr, 0, sizeof(*tr_hdr));
iov.iov_base = tr_hdr;
iov.iov_len = sizeof(*tr_hdr);
@@ -542,7 +541,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_nblk_credits(server->CurrentMid,
server->conn_id, server->hostname, scredits, -1, in_flight);
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
__func__, 1, scredits);
@@ -648,7 +647,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_waitff_credits(server->CurrentMid,
server->conn_id, server->hostname, scredits,
-(num_credits), in_flight);
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 0ed880f42525..e6dea6dfca16 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -1066,12 +1066,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
/* wake up the caller thread for sync decompression */
if (sync) {
- unsigned long flags;
-
- spin_lock_irqsave(&io->u.wait.lock, flags);
if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
+ complete(&io->u.done);
+
return;
}
@@ -1217,7 +1214,7 @@ jobqueue_init(struct super_block *sb,
} else {
fg_out:
q = fgq;
- init_waitqueue_head(&fgq->u.wait);
+ init_completion(&fgq->u.done);
atomic_set(&fgq->pending_bios, 0);
}
q->sb = sb;
@@ -1419,8 +1416,7 @@ static void z_erofs_runqueue(struct super_block *sb,
return;
/* wait until all bios are completed */
- io_wait_event(io[JQ_SUBMIT].u.wait,
- !atomic_read(&io[JQ_SUBMIT].pending_bios));
+ wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */
z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index e043216b545f..800b11c53f57 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -97,7 +97,7 @@ struct z_erofs_decompressqueue {
z_erofs_next_pcluster_t head;
union {
- wait_queue_head_t wait;
+ struct completion done;
struct work_struct work;
} u;
};
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3f87cca49f0c..a743b1e3b89e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2273,6 +2273,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
* Structure of a directory entry
*/
#define EXT4_NAME_LEN 255
+/*
+ * Base length of the ext4 directory entry excluding the name length
+ */
+#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
struct ext4_dir_entry {
__le32 inode; /* Inode number */
@@ -3032,7 +3036,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern int ext4_truncate(struct inode *);
extern int ext4_break_layouts(struct inode *);
-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
+extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
extern void ext4_set_inode_flags(struct inode *, bool init);
extern int ext4_alloc_da_blocks(struct inode *inode);
extern void ext4_set_aops(struct inode *inode);
@@ -3064,6 +3068,7 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
struct dentry *dentry, struct fileattr *fa);
int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
extern void ext4_reset_inode_seed(struct inode *inode);
+int ext4_update_overhead(struct super_block *sb);
/* migrate.c */
extern int ext4_ext_migrate(struct inode *);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0d98cf402282..e473fde6b64b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4500,9 +4500,9 @@ retry:
return ret > 0 ? ret2 : ret;
}
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
@@ -4574,6 +4574,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
+ ret = file_modified(file);
+ if (ret)
+ goto out_mutex;
+
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
@@ -4690,7 +4694,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE) {
- ret = ext4_punch_hole(inode, offset, len);
+ ret = ext4_punch_hole(file, offset, len);
goto exit;
}
@@ -4699,12 +4703,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto exit;
if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- ret = ext4_collapse_range(inode, offset, len);
+ ret = ext4_collapse_range(file, offset, len);
goto exit;
}
if (mode & FALLOC_FL_INSERT_RANGE) {
- ret = ext4_insert_range(inode, offset, len);
+ ret = ext4_insert_range(file, offset, len);
goto exit;
}
@@ -4740,6 +4744,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
/* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
+ ret = file_modified(file);
+ if (ret)
+ goto out;
+
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
if (ret)
goto out;
@@ -5241,8 +5249,9 @@ out:
* This implements the fallocate's collapse range functionality for ext4
* Returns: 0 and non-zero on error.
*/
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
ext4_lblk_t punch_start, punch_stop;
@@ -5294,6 +5303,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
/* Wait for existing dio to complete */
inode_dio_wait(inode);
+ ret = file_modified(file);
+ if (ret)
+ goto out_mutex;
+
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
@@ -5387,8 +5400,9 @@ out_mutex:
* by len bytes.
* Returns 0 on success, error otherwise.
*/
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
handle_t *handle;
@@ -5445,6 +5459,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
/* Wait for existing dio to complete */
inode_dio_wait(inode);
+ ret = file_modified(file);
+ if (ret)
+ goto out_mutex;
+
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 13740f2d0e61..646ece9b3455 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3953,12 +3953,14 @@ int ext4_break_layouts(struct inode *inode)
* Returns: 0 on success or negative on failure
*/
-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
{
+ struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
- loff_t first_block_offset, last_block_offset;
+ loff_t first_block_offset, last_block_offset, max_length;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle;
unsigned int credits;
int ret = 0, ret2 = 0;
@@ -4001,6 +4003,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
offset;
}
+ /*
+ * For punch hole the length + offset needs to be within one block
+ * before last range. Adjust the length if it goes beyond that limit.
+ */
+ max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
+ if (offset + length > max_length)
+ length = max_length - offset;
+
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
@@ -4016,6 +4026,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
/* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
+ ret = file_modified(file);
+ if (ret)
+ goto out_mutex;
+
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 992229ca2d83..ba44fa1be70a 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1652,3 +1652,19 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
+
+static void set_overhead(struct ext4_super_block *es, const void *arg)
+{
+ es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
+}
+
+int ext4_update_overhead(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+ if (sb_rdonly(sb) || sbi->s_overhead == 0 ||
+ sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))
+ return 0;
+
+ return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
+}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e37da8d5cd0c..767b4bfe39c3 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
- while ((char *) de < dlimit) {
+ while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
/* this code is executed quadratically often */
/* do minimal checking `by hand' */
- if ((char *) de + de->name_len <= dlimit &&
+ if (de->name + de->name_len <= dlimit &&
ext4_match(dir, fname, de)) {
/* found a match - just to be sure, do
* a full check */
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 495ce59fb4ad..14695e2b5042 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
continue;
}
clear_buffer_async_write(bh);
- if (bio->bi_status)
+ if (bio->bi_status) {
+ set_buffer_write_io_error(bh);
buffer_io_error(bh);
+ }
} while ((bh = bh->b_this_page) != head);
spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
if (!under_io) {
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 81749eaddf4c..1466fbdbc8e3 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1199,20 +1199,25 @@ static void ext4_put_super(struct super_block *sb)
int aborted = 0;
int i, err;
- ext4_unregister_li_request(sb);
- ext4_quota_off_umount(sb);
-
- flush_work(&sbi->s_error_work);
- destroy_workqueue(sbi->rsv_conversion_wq);
- ext4_release_orphan_info(sb);
-
/*
* Unregister sysfs before destroying jbd2 journal.
* Since we could still access attr_journal_task attribute via sysfs
* path which could have sbi->s_journal->j_task as NULL
+ * Unregister sysfs before flush sbi->s_error_work.
+ * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
+ * read metadata verify failed then will queue error work.
+ * flush_stashed_error_work will call start_this_handle may trigger
+ * BUG_ON.
*/
ext4_unregister_sysfs(sb);
+ ext4_unregister_li_request(sb);
+ ext4_quota_off_umount(sb);
+
+ flush_work(&sbi->s_error_work);
+ destroy_workqueue(sbi->rsv_conversion_wq);
+ ext4_release_orphan_info(sb);
+
if (sbi->s_journal) {
aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal);
@@ -4172,9 +4177,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
ext4_fsblk_t first_block, last_block, b;
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
int s, j, count = 0;
+ int has_super = ext4_bg_has_super(sb, grp);
if (!ext4_has_feature_bigalloc(sb))
- return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+ return (has_super + ext4_bg_num_gdb(sb, grp) +
+ (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
sbi->s_itb_per_group + 2);
first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
@@ -5282,9 +5289,18 @@ no_journal:
* Get the # of file system overhead blocks from the
* superblock if present.
*/
- if (es->s_overhead_clusters)
- sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
- else {
+ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+ /* ignore the precalculated value if it is ridiculous */
+ if (sbi->s_overhead > ext4_blocks_count(es))
+ sbi->s_overhead = 0;
+ /*
+ * If the bigalloc feature is not enabled recalculating the
+ * overhead doesn't take long, so we might as well just redo
+ * it to make sure we are using the correct value.
+ */
+ if (!ext4_has_feature_bigalloc(sb))
+ sbi->s_overhead = 0;
+ if (sbi->s_overhead == 0) {
err = ext4_calculate_overhead(sb);
if (err)
goto failed_mount_wq;
@@ -5602,6 +5618,8 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
"Quota mode: %s.", descr, ext4_quota_mode(sb));
+ /* Update the s_overhead_clusters if necessary */
+ ext4_update_overhead(sb);
return 0;
free_sbi:
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f5366feea82d..909085a78f9c 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -98,9 +98,9 @@ repeat:
}
if (unlikely(!PageUptodate(page))) {
- if (page->index == sbi->metapage_eio_ofs &&
- sbi->metapage_eio_cnt++ == MAX_RETRY_META_PAGE_EIO) {
- set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ if (page->index == sbi->metapage_eio_ofs) {
+ if (sbi->metapage_eio_cnt++ == MAX_RETRY_META_PAGE_EIO)
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
} else {
sbi->metapage_eio_ofs = page->index;
sbi->metapage_eio_cnt = 0;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 8e0c2e773c8d..9a1a526f2092 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -388,11 +388,23 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
return 0;
}
-static void __attach_io_flag(struct f2fs_io_info *fio, unsigned int io_flag)
+static unsigned int f2fs_io_flags(struct f2fs_io_info *fio)
{
unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
- unsigned int fua_flag = io_flag & temp_mask;
- unsigned int meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
+ unsigned int fua_flag, meta_flag, io_flag;
+ unsigned int op_flags = 0;
+
+ if (fio->op != REQ_OP_WRITE)
+ return 0;
+ if (fio->type == DATA)
+ io_flag = fio->sbi->data_io_flag;
+ else if (fio->type == NODE)
+ io_flag = fio->sbi->node_io_flag;
+ else
+ return 0;
+
+ fua_flag = io_flag & temp_mask;
+ meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
/*
* data/node io flag bits per temp:
@@ -401,9 +413,10 @@ static void __attach_io_flag(struct f2fs_io_info *fio, unsigned int io_flag)
* Cold | Warm | Hot | Cold | Warm | Hot |
*/
if ((1 << fio->temp) & meta_flag)
- fio->op_flags |= REQ_META;
+ op_flags |= REQ_META;
if ((1 << fio->temp) & fua_flag)
- fio->op_flags |= REQ_FUA;
+ op_flags |= REQ_FUA;
+ return op_flags;
}
static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
@@ -413,14 +426,10 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
sector_t sector;
struct bio *bio;
- if (fio->type == DATA)
- __attach_io_flag(fio, sbi->data_io_flag);
- else if (fio->type == NODE)
- __attach_io_flag(fio, sbi->node_io_flag);
-
bdev = f2fs_target_device(sbi, fio->new_blkaddr, &sector);
- bio = bio_alloc_bioset(bdev, npages, fio->op | fio->op_flags, GFP_NOIO,
- &f2fs_bioset);
+ bio = bio_alloc_bioset(bdev, npages,
+ fio->op | fio->op_flags | f2fs_io_flags(fio),
+ GFP_NOIO, &f2fs_bioset);
bio->bi_iter.bi_sector = sector;
if (is_read_io(fio->op)) {
bio->bi_end_io = f2fs_read_end_io;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index cd1e65bcf0b0..8c570de21ed5 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -154,7 +154,6 @@ struct f2fs_mount_info {
int s_jquota_fmt; /* Format of quota to use */
#endif
/* For which write hints are passed down to block layer */
- int whint_mode;
int alloc_mode; /* segment allocation policy */
int fsync_mode; /* fsync policy */
int fs_mode; /* fs mode: LFS or ADAPTIVE */
@@ -1334,12 +1333,6 @@ enum {
};
enum {
- WHINT_MODE_OFF, /* not pass down write hints */
- WHINT_MODE_USER, /* try to pass down hints given by users */
- WHINT_MODE_FS, /* pass down hints with F2FS policy */
-};
-
-enum {
ALLOC_MODE_DEFAULT, /* stay default */
ALLOC_MODE_REUSE, /* reuse segments as much as possible */
};
@@ -3657,8 +3650,6 @@ void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init f2fs_create_segment_manager_caches(void);
void f2fs_destroy_segment_manager_caches(void);
int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
-enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
- enum page_type type, enum temp_type temp);
unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int segno);
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 71f232dcf3c2..83639238a1fe 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -550,7 +550,8 @@ make_now:
}
f2fs_set_inode_flags(inode);
- if (file_should_truncate(inode)) {
+ if (file_should_truncate(inode) &&
+ !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
ret = f2fs_truncate(inode);
if (ret)
goto bad_inode;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 22dfeb991529..bd9731cdec56 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -3243,101 +3243,6 @@ int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
}
}
-/* This returns write hints for each segment type. This hints will be
- * passed down to block layer. There are mapping tables which depend on
- * the mount option 'whint_mode'.
- *
- * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
- *
- * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
- *
- * User F2FS Block
- * ---- ---- -----
- * META WRITE_LIFE_NOT_SET
- * HOT_NODE "
- * WARM_NODE "
- * COLD_NODE "
- * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
- * extension list " "
- *
- * -- buffered io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE " "
- * WRITE_LIFE_MEDIUM " "
- * WRITE_LIFE_LONG " "
- *
- * -- direct io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE " WRITE_LIFE_NONE
- * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
- * WRITE_LIFE_LONG " WRITE_LIFE_LONG
- *
- * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
- *
- * User F2FS Block
- * ---- ---- -----
- * META WRITE_LIFE_MEDIUM;
- * HOT_NODE WRITE_LIFE_NOT_SET
- * WARM_NODE "
- * COLD_NODE WRITE_LIFE_NONE
- * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
- * extension list " "
- *
- * -- buffered io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
- * WRITE_LIFE_NONE " "
- * WRITE_LIFE_MEDIUM " "
- * WRITE_LIFE_LONG " "
- *
- * -- direct io
- * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE " WRITE_LIFE_NONE
- * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
- * WRITE_LIFE_LONG " WRITE_LIFE_LONG
- */
-
-enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
- enum page_type type, enum temp_type temp)
-{
- if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
- if (type == DATA) {
- if (temp == WARM)
- return WRITE_LIFE_NOT_SET;
- else if (temp == HOT)
- return WRITE_LIFE_SHORT;
- else if (temp == COLD)
- return WRITE_LIFE_EXTREME;
- } else {
- return WRITE_LIFE_NOT_SET;
- }
- } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
- if (type == DATA) {
- if (temp == WARM)
- return WRITE_LIFE_LONG;
- else if (temp == HOT)
- return WRITE_LIFE_SHORT;
- else if (temp == COLD)
- return WRITE_LIFE_EXTREME;
- } else if (type == NODE) {
- if (temp == WARM || temp == HOT)
- return WRITE_LIFE_NOT_SET;
- else if (temp == COLD)
- return WRITE_LIFE_NONE;
- } else if (type == META) {
- return WRITE_LIFE_MEDIUM;
- }
- }
- return WRITE_LIFE_NOT_SET;
-}
-
static int __get_segment_type_2(struct f2fs_io_info *fio)
{
if (fio->type == DATA)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index ea939db18f88..4368f90571bd 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -138,7 +138,6 @@ enum {
Opt_jqfmt_vfsold,
Opt_jqfmt_vfsv0,
Opt_jqfmt_vfsv1,
- Opt_whint,
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
@@ -214,7 +213,6 @@ static match_table_t f2fs_tokens = {
{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
- {Opt_whint, "whint_mode=%s"},
{Opt_alloc, "alloc_mode=%s"},
{Opt_fsync, "fsync_mode=%s"},
{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
@@ -975,22 +973,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
f2fs_info(sbi, "quota operations not supported");
break;
#endif
- case Opt_whint:
- name = match_strdup(&args[0]);
- if (!name)
- return -ENOMEM;
- if (!strcmp(name, "user-based")) {
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
- } else if (!strcmp(name, "off")) {
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
- } else if (!strcmp(name, "fs-based")) {
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
- } else {
- kfree(name);
- return -EINVAL;
- }
- kfree(name);
- break;
case Opt_alloc:
name = match_strdup(&args[0]);
if (!name)
@@ -1328,12 +1310,6 @@ default_check:
return -EINVAL;
}
- /* Not pass down write hints if the number of active logs is lesser
- * than NR_CURSEG_PERSIST_TYPE.
- */
- if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
-
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
f2fs_err(sbi, "Allow to mount readonly mode only");
return -EROFS;
@@ -1978,10 +1954,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",prjquota");
#endif
f2fs_show_quota_options(seq, sbi->sb);
- if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
- seq_printf(seq, ",whint_mode=%s", "user-based");
- else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
- seq_printf(seq, ",whint_mode=%s", "fs-based");
fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
@@ -2033,7 +2005,6 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
- F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
@@ -2314,8 +2285,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
- if (*flags & SB_RDONLY ||
- F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
+ if (*flags & SB_RDONLY) {
sync_inodes_sb(sb);
set_sbi_flag(sbi, SBI_IS_DIRTY);
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index 76316c4a3fb7..b313a978ae0a 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -38,6 +38,3 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_OLD_API
- bool
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index 2749933852a9..d645f8b302a2 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -214,7 +214,7 @@ void fscache_relinquish_cache(struct fscache_cache *cache)
cache->ops = NULL;
cache->cache_priv = NULL;
- smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
fscache_put_cache(cache, where);
}
EXPORT_SYMBOL(fscache_relinquish_cache);
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 9bb1ab5fe5ed..9d3cf0111709 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -30,7 +30,7 @@ static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
-unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
{
@@ -1069,6 +1069,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
}
EXPORT_SYMBOL(__fscache_invalidate);
+#ifdef CONFIG_PROC_FS
/*
* Generate a list of extant cookies in /proc/fs/fscache/cookies
*/
@@ -1145,3 +1146,4 @@ const struct seq_operations fscache_cookies_seq_ops = {
.stop = fscache_cookies_seq_stop,
.show = fscache_cookies_seq_show,
};
+#endif
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index ed1c9ed737f2..1336f517e9b1 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -56,7 +56,9 @@ static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
* cookie.c
*/
extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
extern struct timer_list fscache_cookie_lru_timer;
extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
@@ -137,7 +139,9 @@ int fscache_stats_show(struct seq_file *m, void *v);
/*
* volume.c
*/
+#ifdef CONFIG_PROC_FS
extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);
diff --git a/fs/fscache/io.c b/fs/fscache/io.c
index c8c7fe9e9a6e..3af3b08a9bb3 100644
--- a/fs/fscache/io.c
+++ b/fs/fscache/io.c
@@ -235,8 +235,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
{
struct fscache_write_request *wreq = priv;
- fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
- wreq->mapping, wreq->start, wreq->len,
+ fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
wreq->set_bits);
if (wreq->term_func)
@@ -296,7 +295,7 @@ abandon_end:
abandon_free:
kfree(wreq);
abandon:
- fscache_clear_page_bits(cookie, mapping, start, len, cond);
+ fscache_clear_page_bits(mapping, start, len, cond);
if (term_func)
term_func(term_func_priv, ret, false);
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 22b41acfbbc3..48f01323c37c 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -899,10 +899,10 @@ retry:
ret = gfs2_glock_nq(gh);
if (ret)
goto out_uninit;
-retry_under_glock:
/* Silently fall back to buffered I/O when writing beyond EOF */
if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
goto out;
+retry_under_glock:
from->nofault = true;
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
@@ -991,8 +991,6 @@ retry_under_glock:
if (leftover != window_size) {
if (gfs2_holder_queued(&gh))
goto retry_under_glock;
- if (written)
- goto out_uninit;
goto retry;
}
}
@@ -1069,8 +1067,6 @@ retry_under_glock:
from->count = min(from->count, window_size - leftover);
if (gfs2_holder_queued(gh))
goto retry_under_glock;
- if (read && !(iocb->ki_flags & IOCB_DIRECT))
- goto out_uninit;
goto retry;
}
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 99c7477cee5c..dd3a088db11d 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
- info.high_limit = TASK_SIZE;
+ info.high_limit = arch_get_mmap_end(addr);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = current->mm->mmap_base;
+ info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = current->mm->mmap_base;
- info.high_limit = TASK_SIZE;
+ info.high_limit = arch_get_mmap_end(addr);
addr = vm_unmapped_area(&info);
}
@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct hstate *h = hstate_file(file);
+ const unsigned long mmap_end = arch_get_mmap_end(addr);
if (len & ~huge_page_mask(h))
return -EINVAL;
@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
+ if (mmap_end - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 04d374e65e54..dbecd27656c7 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -155,7 +155,6 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
struct io_wq_work {
struct io_wq_work_node list;
unsigned flags;
- int fd;
};
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 659f8ecba5b7..e01f595f5b7d 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -907,7 +907,11 @@ struct io_kiocb {
u64 user_data;
u32 result;
- u32 cflags;
+ /* fd initially, then cflags for completion */
+ union {
+ u32 cflags;
+ int fd;
+ };
struct io_ring_ctx *ctx;
struct task_struct *task;
@@ -916,8 +920,12 @@ struct io_kiocb {
/* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu;
- /* used by request caches, completion batching and iopoll */
- struct io_wq_work_node comp_list;
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ int apoll_events;
+ };
atomic_t refs;
atomic_t poll_refs;
struct io_task_work io_task_work;
@@ -2789,11 +2797,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
/* order with io_complete_rw_iopoll(), e.g. ->result updates */
if (!smp_load_acquire(&req->iopoll_completed))
break;
+ nr_events++;
if (unlikely(req->flags & REQ_F_CQE_SKIP))
continue;
-
__io_fill_cqe_req(req, req->result, io_put_kbuf(req, 0));
- nr_events++;
}
if (unlikely(!nr_events))
@@ -3183,19 +3190,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
{
struct kiocb *kiocb = &req->rw.kiocb;
- bool is_stream = req->file->f_mode & FMODE_STREAM;
- if (kiocb->ki_pos == -1) {
- if (!is_stream) {
- req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = req->file->f_pos;
- return &kiocb->ki_pos;
- } else {
- kiocb->ki_pos = 0;
- return NULL;
- }
+ if (kiocb->ki_pos != -1)
+ return &kiocb->ki_pos;
+
+ if (!(req->file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = req->file->f_pos;
+ return &kiocb->ki_pos;
}
- return is_stream ? NULL : &kiocb->ki_pos;
+
+ kiocb->ki_pos = 0;
+ return NULL;
}
static void kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -3777,6 +3783,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
return -EOPNOTSUPP;
+ kiocb->private = NULL;
kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
@@ -3825,8 +3832,10 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
iovec = NULL;
}
ret = io_rw_init_file(req, FMODE_READ);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ kfree(iovec);
return ret;
+ }
req->result = iov_iter_count(&s->iter);
if (force_nonblock) {
@@ -3951,8 +3960,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
iovec = NULL;
}
ret = io_rw_init_file(req, FMODE_WRITE);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ kfree(iovec);
return ret;
+ }
req->result = iov_iter_count(&s->iter);
if (force_nonblock) {
@@ -4351,7 +4362,7 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
if (sp->flags & SPLICE_F_FD_IN_FIXED)
- in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
+ in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
else
in = io_file_get_normal(req, sp->splice_fd_in);
if (!in) {
@@ -4393,7 +4404,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
if (sp->flags & SPLICE_F_FD_IN_FIXED)
- in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
+ in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
else
in = io_file_get_normal(req, sp->splice_fd_in);
if (!in) {
@@ -5197,6 +5208,8 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
@@ -5408,6 +5421,8 @@ static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
+ if (unlikely(sqe->addr2 || sqe->file_index))
+ return -EINVAL;
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
@@ -5834,7 +5849,6 @@ static void io_poll_remove_entries(struct io_kiocb *req)
static int io_poll_check_events(struct io_kiocb *req, bool locked)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_poll_iocb *poll = io_poll_get_single(req);
int v;
/* req->task == current here, checking PF_EXITING is safe */
@@ -5851,17 +5865,17 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
return -ECANCELED;
if (!req->result) {
- struct poll_table_struct pt = { ._key = req->cflags };
+ struct poll_table_struct pt = { ._key = req->apoll_events };
+ unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
- if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
- req->result = -EBADF;
- else
- req->result = vfs_poll(req->file, &pt) & req->cflags;
+ if (unlikely(!io_assign_file(req, flags)))
+ return -EBADF;
+ req->result = vfs_poll(req->file, &pt) & req->apoll_events;
}
/* multishot, just fill an CQE and proceed */
- if (req->result && !(req->cflags & EPOLLONESHOT)) {
- __poll_t mask = mangle_poll(req->result & poll->events);
+ if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+ __poll_t mask = mangle_poll(req->result & req->apoll_events);
bool filled;
spin_lock(&ctx->completion_lock);
@@ -5939,7 +5953,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
* CPU. We want to avoid pulling in req->apoll->events for that
* case.
*/
- req->cflags = events;
+ req->apoll_events = events;
if (req->opcode == IORING_OP_POLL_ADD)
req->io_task_work.func = io_poll_task_func;
else
@@ -6331,7 +6345,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EINVAL;
io_req_set_refcount(req);
- req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+ req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
return 0;
}
@@ -6833,6 +6847,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
up.nr = 0;
up.tags = 0;
up.resv = 0;
+ up.resv2 = 0;
io_ring_submit_lock(ctx, needs_lock);
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
@@ -7088,9 +7103,9 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
return true;
if (req->flags & REQ_F_FIXED_FILE)
- req->file = io_file_get_fixed(req, req->work.fd, issue_flags);
+ req->file = io_file_get_fixed(req, req->fd, issue_flags);
else
- req->file = io_file_get_normal(req, req->work.fd);
+ req->file = io_file_get_normal(req, req->fd);
if (req->file)
return true;
@@ -7104,13 +7119,14 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
const struct cred *creds = NULL;
int ret;
+ if (unlikely(!io_assign_file(req, issue_flags)))
+ return -EBADF;
+
if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
creds = override_creds(req->creds);
if (!io_op_defs[req->opcode].audit_skip)
audit_uring_entry(req->opcode);
- if (unlikely(!io_assign_file(req, issue_flags)))
- return -EBADF;
switch (req->opcode) {
case IORING_OP_NOP:
@@ -7271,16 +7287,18 @@ static void io_wq_submit_work(struct io_wq_work *work)
if (timeout)
io_queue_linked_timeout(timeout);
- if (!io_assign_file(req, issue_flags)) {
- err = -EBADF;
- work->flags |= IO_WQ_WORK_CANCEL;
- }
/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
if (work->flags & IO_WQ_WORK_CANCEL) {
+fail:
io_req_task_queue_fail(req, err);
return;
}
+ if (!io_assign_file(req, issue_flags)) {
+ err = -EBADF;
+ work->flags |= IO_WQ_WORK_CANCEL;
+ goto fail;
+ }
if (req->flags & REQ_F_FORCE_ASYNC) {
bool opcode_poll = def->pollin || def->pollout;
@@ -7628,7 +7646,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[opcode].needs_file) {
struct io_submit_state *state = &ctx->submit_state;
- req->work.fd = READ_ONCE(sqe->fd);
+ req->fd = READ_ONCE(sqe->fd);
/*
* Plug now if we have more than 2 IO left after this, and the
@@ -10524,6 +10542,11 @@ static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
break;
}
+ if (reg.resv) {
+ ret = -EINVAL;
+ break;
+ }
+
if (reg.offset == -1U) {
start = 0;
end = IO_RINGFD_REG_MAX;
@@ -10570,7 +10593,7 @@ static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
ret = -EFAULT;
break;
}
- if (reg.offset >= IO_RINGFD_REG_MAX) {
+ if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
ret = -EINVAL;
break;
}
@@ -10697,6 +10720,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
return -EINVAL;
if (copy_from_user(&arg, argp, sizeof(arg)))
return -EFAULT;
+ if (arg.pad)
+ return -EINVAL;
*sig = u64_to_user_ptr(arg.sigmask);
*argsz = arg.sigmask_sz;
*ts = u64_to_user_ptr(arg.ts);
@@ -11178,7 +11203,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
- IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
+ IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
+ IORING_FEAT_LINKED_FILE;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -11389,8 +11415,6 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
__u32 tmp;
int err;
- if (up->resv)
- return -EINVAL;
if (check_add_overflow(up->offset, nr_args, &tmp))
return -EOVERFLOW;
err = io_rsrc_node_switch_start(ctx);
@@ -11416,6 +11440,8 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
memset(&up, 0, sizeof(up));
if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
return -EFAULT;
+ if (up.resv || up.resv2)
+ return -EINVAL;
return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
}
@@ -11428,7 +11454,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
- if (!up.nr || up.resv)
+ if (!up.nr || up.resv || up.resv2)
return -EINVAL;
return __io_register_rsrc_update(ctx, type, &up, up.nr);
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 5b9408e3b370..ac7f067b7bdd 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -488,7 +488,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd2_journal_wait_updates(journal);
commit_transaction->t_state = T_SWITCH;
- write_unlock(&journal->j_state_lock);
J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
journal->j_max_transaction_buffers);
@@ -508,6 +507,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* has reserved. This is consistent with the existing behaviour
* that multiple jbd2_journal_get_write_access() calls to the same
* buffer are perfectly permissible.
+ * We use journal->j_state_lock here to serialize processing of
+ * t_reserved_list with eviction of buffers from journal_unmap_buffer().
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
@@ -527,6 +528,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
jbd2_journal_refile_buffer(journal, jh);
}
+ write_unlock(&journal->j_state_lock);
/*
* Now try to drop any written-back buffers from the journal's
* checkpoint lists. We do this *before* commit because it potentially
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 61a8edc4ba8b..e205fde7163a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1406,7 +1406,12 @@ static void __kernfs_remove(struct kernfs_node *kn)
*/
void kernfs_remove(struct kernfs_node *kn)
{
- struct kernfs_root *root = kernfs_root(kn);
+ struct kernfs_root *root;
+
+ if (!kn)
+ return;
+
+ root = kernfs_root(kn);
down_write(&root->kernfs_rwsem);
__kernfs_remove(kn);
diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
index 60e7ac62c917..1e2076a53bed 100644
--- a/fs/ksmbd/misc.c
+++ b/fs/ksmbd/misc.c
@@ -158,19 +158,41 @@ out:
* Return : windows path string or error
*/
-char *convert_to_nt_pathname(char *filename)
+char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+ struct path *path)
{
- char *ab_pathname;
+ char *pathname, *ab_pathname, *nt_pathname;
+ int share_path_len = share->path_sz;
- if (strlen(filename) == 0)
- filename = "\\";
+ pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!pathname)
+ return ERR_PTR(-EACCES);
- ab_pathname = kstrdup(filename, GFP_KERNEL);
- if (!ab_pathname)
- return NULL;
+ ab_pathname = d_path(path, pathname, PATH_MAX);
+ if (IS_ERR(ab_pathname)) {
+ nt_pathname = ERR_PTR(-EACCES);
+ goto free_pathname;
+ }
+
+ if (strncmp(ab_pathname, share->path, share_path_len)) {
+ nt_pathname = ERR_PTR(-EACCES);
+ goto free_pathname;
+ }
+
+ nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL);
+ if (!nt_pathname) {
+ nt_pathname = ERR_PTR(-ENOMEM);
+ goto free_pathname;
+ }
+ if (ab_pathname[share_path_len] == '\0')
+ strcpy(nt_pathname, "/");
+ strcat(nt_pathname, &ab_pathname[share_path_len]);
+
+ ksmbd_conv_path_to_windows(nt_pathname);
- ksmbd_conv_path_to_windows(ab_pathname);
- return ab_pathname;
+free_pathname:
+ kfree(pathname);
+ return nt_pathname;
}
int get_nlink(struct kstat *st)
diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h
index 253366bd0951..aae2a252945f 100644
--- a/fs/ksmbd/misc.h
+++ b/fs/ksmbd/misc.h
@@ -14,7 +14,8 @@ struct ksmbd_file;
int match_pattern(const char *str, size_t len, const char *pattern);
int ksmbd_validate_filename(char *filename);
int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename);
+char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+ struct path *path);
int get_nlink(struct kstat *st);
void ksmbd_conv_path_to_unix(char *path);
void ksmbd_strip_last_slash(char *path);
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 23871b18a429..8b5560574d4c 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -1694,33 +1694,3 @@ out:
read_unlock(&lease_list_lock);
return ret_op;
}
-
-int smb2_check_durable_oplock(struct ksmbd_file *fp,
- struct lease_ctx_info *lctx, char *name)
-{
- struct oplock_info *opinfo = opinfo_get(fp);
- int ret = 0;
-
- if (opinfo && opinfo->is_lease) {
- if (!lctx) {
- pr_err("open does not include lease\n");
- ret = -EBADF;
- goto out;
- }
- if (memcmp(opinfo->o_lease->lease_key, lctx->lease_key,
- SMB2_LEASE_KEY_SIZE)) {
- pr_err("invalid lease key\n");
- ret = -EBADF;
- goto out;
- }
- if (name && strcmp(fp->filename, name)) {
- pr_err("invalid name reconnect %s\n", name);
- ret = -EINVAL;
- goto out;
- }
- }
-out:
- if (opinfo)
- opinfo_put(opinfo);
- return ret;
-}
diff --git a/fs/ksmbd/oplock.h b/fs/ksmbd/oplock.h
index 0cf7a2b5bbc0..09753448f779 100644
--- a/fs/ksmbd/oplock.h
+++ b/fs/ksmbd/oplock.h
@@ -124,6 +124,4 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
struct lease_ctx_info *lctx);
void destroy_lease_table(struct ksmbd_conn *conn);
-int smb2_check_durable_oplock(struct ksmbd_file *fp,
- struct lease_ctx_info *lctx, char *name);
#endif /* __KSMBD_OPLOCK_H */
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 3bf6c56c654c..16c803a9d996 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -11,6 +11,7 @@
#include <linux/statfs.h>
#include <linux/ethtool.h>
#include <linux/falloc.h>
+#include <linux/mount.h>
#include "glob.h"
#include "smbfsctl.h"
@@ -2918,7 +2919,6 @@ int smb2_open(struct ksmbd_work *work)
goto err_out;
}
- fp->filename = name;
fp->cdoption = req->CreateDisposition;
fp->daccess = daccess;
fp->saccess = req->ShareAccess;
@@ -3270,14 +3270,13 @@ err_out1:
if (!rsp->hdr.Status)
rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
- if (!fp || !fp->filename)
- kfree(name);
if (fp)
ksmbd_fd_put(work, fp);
smb2_set_err_rsp(work);
ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status);
}
+ kfree(name);
kfree(lc);
return 0;
@@ -3895,8 +3894,6 @@ int smb2_query_dir(struct ksmbd_work *work)
ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr);
}
- ksmbd_debug(SMB, "Directory name is %s\n", dir_fp->filename);
-
if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
ksmbd_debug(SMB, "Restart directory scan\n");
generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
@@ -4390,9 +4387,9 @@ static int get_file_all_info(struct ksmbd_work *work,
return -EACCES;
}
- filename = convert_to_nt_pathname(fp->filename);
- if (!filename)
- return -ENOMEM;
+ filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path);
+ if (IS_ERR(filename))
+ return PTR_ERR(filename);
inode = file_inode(fp->filp);
generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
@@ -4999,15 +4996,17 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
case FS_SECTOR_SIZE_INFORMATION:
{
struct smb3_fs_ss_info *info;
+ unsigned int sector_size =
+ min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096);
info = (struct smb3_fs_ss_info *)(rsp->Buffer);
- info->LogicalBytesPerSector = cpu_to_le32(stfs.f_bsize);
+ info->LogicalBytesPerSector = cpu_to_le32(sector_size);
info->PhysicalBytesPerSectorForAtomicity =
- cpu_to_le32(stfs.f_bsize);
- info->PhysicalBytesPerSectorForPerf = cpu_to_le32(stfs.f_bsize);
+ cpu_to_le32(sector_size);
+ info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size);
info->FSEffPhysicalBytesPerSectorForAtomicity =
- cpu_to_le32(stfs.f_bsize);
+ cpu_to_le32(sector_size);
info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
info->ByteOffsetForSectorAlignment = 0;
@@ -5683,8 +5682,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
size = i_size_read(inode);
rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
if (rc) {
- pr_err("truncate failed! filename : %s, err %d\n",
- fp->filename, rc);
+ pr_err("truncate failed!, err %d\n", rc);
return rc;
}
if (size < alloc_blks * 512)
@@ -5714,12 +5712,10 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
* truncated range.
*/
if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
- ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
- fp->filename, newsize);
+ ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize);
rc = ksmbd_vfs_truncate(work, fp, newsize);
if (rc) {
- ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
- fp->filename, rc);
+ ksmbd_debug(SMB, "truncate failed!, err %d\n", rc);
if (rc != -EAGAIN)
rc = -EBADF;
return rc;
@@ -5765,8 +5761,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
if (parent_fp) {
if (parent_fp->daccess & FILE_DELETE_LE) {
pr_err("parent dir is opened with delete access\n");
+ ksmbd_fd_put(work, parent_fp);
return -ESHARE;
}
+ ksmbd_fd_put(work, parent_fp);
}
next:
return smb2_rename(work, fp, user_ns, rename_info,
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index 9cebb6ba555b..dcdd07c6efff 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -398,8 +398,7 @@ int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
nbytes = kernel_read(filp, rbuf, count, pos);
if (nbytes < 0) {
- pr_err("smb read failed for (%s), err = %zd\n",
- fp->filename, nbytes);
+ pr_err("smb read failed, err = %zd\n", nbytes);
return nbytes;
}
@@ -875,8 +874,7 @@ int ksmbd_vfs_truncate(struct ksmbd_work *work,
err = vfs_truncate(&filp->f_path, size);
if (err)
- pr_err("truncate failed for filename : %s err %d\n",
- fp->filename, err);
+ pr_err("truncate failed, err %d\n", err);
return err;
}
diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
index 29c1db66bd0f..c4d59d2735f0 100644
--- a/fs/ksmbd/vfs_cache.c
+++ b/fs/ksmbd/vfs_cache.c
@@ -328,7 +328,6 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
kfree(smb_lock);
}
- kfree(fp->filename);
if (ksmbd_stream_fd(fp))
kfree(fp->stream.name);
kmem_cache_free(filp_cache, fp);
@@ -497,6 +496,7 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
list_for_each_entry(lfp, &ci->m_fp_list, node) {
if (inode == file_inode(lfp->filp)) {
atomic_dec(&ci->m_count);
+ lfp = ksmbd_fp_get(lfp);
read_unlock(&ci->m_lock);
return lfp;
}
diff --git a/fs/ksmbd/vfs_cache.h b/fs/ksmbd/vfs_cache.h
index 36239ce31afd..fcb13413fa8d 100644
--- a/fs/ksmbd/vfs_cache.h
+++ b/fs/ksmbd/vfs_cache.h
@@ -62,7 +62,6 @@ struct ksmbd_inode {
struct ksmbd_file {
struct file *filp;
- char *filename;
u64 persistent_id;
u64 volatile_id;
diff --git a/fs/namei.c b/fs/namei.c
index 3f1829b3ab5b..509657fdf4f5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3673,18 +3673,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct qstr last;
+ bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
+ unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
+ unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
int type;
int err2;
int error;
- bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
- /*
- * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
- * other flags passed in are ignored!
- */
- lookup_flags &= LOOKUP_REVAL;
-
- error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+ error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
if (error)
return ERR_PTR(error);
@@ -3698,11 +3694,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
/* don't fail immediately if it's r/o, at least try to report other errors */
err2 = mnt_want_write(path->mnt);
/*
- * Do the final lookup.
+ * Do the final lookup. Suppress 'create' if there is a trailing
+ * '/', and a directory wasn't requested.
*/
- lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
+ if (last.name[last.len] && !want_dir)
+ create_flags = 0;
inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
- dentry = __lookup_hash(&last, path->dentry, lookup_flags);
+ dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
if (IS_ERR(dentry))
goto unlock;
@@ -3716,7 +3714,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
- if (unlikely(!is_dir && last.name[last.len])) {
+ if (unlikely(!create_flags)) {
error = -ENOENT;
goto fail;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index a0a36bfa3aa0..afe2b64b14f1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4058,10 +4058,22 @@ static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
if (err) {
struct mount *p;
- for (p = mnt; p != m; p = next_mnt(p, mnt)) {
+ /*
+ * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
+ * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
+ * mounts and needs to take care to include the first mount.
+ */
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
/* If we had to hold writers unblock them. */
if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
mnt_unhold_writers(p);
+
+ /*
+ * We're done once the first mount we changed got
+ * MNT_WRITE_HOLD unset.
+ */
+ if (p == m)
+ break;
}
}
return err;
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index c08882f5867b..2c1b027774d4 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -237,6 +237,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
}
static void
+nfsd_file_flush(struct nfsd_file *nf)
+{
+ if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+ nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+}
+
+static void
nfsd_file_do_unhash(struct nfsd_file *nf)
{
lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
@@ -295,19 +302,15 @@ nfsd_file_put_noref(struct nfsd_file *nf)
void
nfsd_file_put(struct nfsd_file *nf)
{
- bool is_hashed;
-
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
+ if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
+ nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
- return;
+ } else {
+ nfsd_file_put_noref(nf);
+ if (nf->nf_file)
+ nfsd_file_schedule_laundrette();
}
-
- filemap_flush(nf->nf_file->f_mapping);
- is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
- nfsd_file_put_noref(nf);
- if (is_hashed)
- nfsd_file_schedule_laundrette();
if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
nfsd_file_gc();
}
@@ -328,6 +331,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
+ nfsd_file_flush(nf);
nfsd_file_put_noref(nf);
}
}
@@ -341,6 +345,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
+ nfsd_file_flush(nf);
if (!refcount_dec_and_test(&nf->nf_ref))
continue;
if (nfsd_file_free(nf))
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index 367551bddfc6..b5760801d377 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -249,34 +249,34 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
int w;
if (!svcxdr_encode_stat(xdr, resp->status))
- return 0;
+ return false;
if (dentry == NULL || d_really_is_negative(dentry))
- return 1;
+ return true;
inode = d_inode(dentry);
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
- return 0;
+ return false;
if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
- return 0;
+ return false;
rqstp->rq_res.page_len = w = nfsacl_size(
(resp->mask & NFS_ACL) ? resp->acl_access : NULL,
(resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
while (w > 0) {
if (!*(rqstp->rq_next_page++))
- return 1;
+ return true;
w -= PAGE_SIZE;
}
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
resp->mask & NFS_ACL, 0))
- return 0;
+ return false;
if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
resp->mask & NFS_DFACL, NFS_ACL_DEFAULT))
- return 0;
+ return false;
- return 1;
+ return true;
}
/* ACCESS */
@@ -286,17 +286,17 @@ nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
struct nfsd3_accessres *resp = rqstp->rq_resp;
if (!svcxdr_encode_stat(xdr, resp->status))
- return 0;
+ return false;
switch (resp->status) {
case nfs_ok:
if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
- return 0;
+ return false;
if (xdr_stream_encode_u32(xdr, resp->access) < 0)
- return 0;
+ return false;
break;
}
- return 1;
+ return true;
}
/*
diff --git a/fs/pipe.c b/fs/pipe.c
index 9648ac15164a..e140ea150bbb 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -804,7 +804,7 @@ struct pipe_inode_info *alloc_pipe_info(void)
if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
goto out_revert_acct;
- pipe->bufs = kvcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+ pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
GFP_KERNEL_ACCOUNT);
if (pipe->bufs) {
@@ -849,7 +849,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
#endif
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
- kvfree(pipe->bufs);
+ kfree(pipe->bufs);
kfree(pipe);
}
@@ -1264,7 +1264,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
if (nr_slots < n)
return -EBUSY;
- bufs = kvcalloc(nr_slots, sizeof(*bufs), GFP_KERNEL_ACCOUNT);
+ bufs = kcalloc(nr_slots, sizeof(*bufs),
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;
@@ -1291,7 +1292,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
head = n;
tail = 0;
- kvfree(pipe->bufs);
+ kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->ring_size = nr_slots;
if (pipe->max_usage > nr_slots)
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 80acb6885cf9..962d32468eb4 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -759,9 +759,14 @@ static void posix_acl_fix_xattr_userns(
}
void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ struct inode *inode,
void *value, size_t size)
{
struct user_namespace *user_ns = current_user_ns();
+
+ /* Leave ids untouched on non-idmapped mounts. */
+ if (no_idmapping(mnt_userns, i_user_ns(inode)))
+ mnt_userns = &init_user_ns;
if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
return;
posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
@@ -769,9 +774,14 @@ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
}
void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+ struct inode *inode,
void *value, size_t size)
{
struct user_namespace *user_ns = current_user_ns();
+
+ /* Leave ids untouched on non-idmapped mounts. */
+ if (no_idmapping(mnt_userns, i_user_ns(inode)))
+ mnt_userns = &init_user_ns;
if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
return;
posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
diff --git a/fs/stat.c b/fs/stat.c
index 7f734be0e57e..5c2c94464e8b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -348,9 +348,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
# define choose_32_64(a,b) b
#endif
-#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
-
#ifndef INIT_STRUCT_STAT_PADDING
# define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
#endif
@@ -359,7 +356,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
{
struct stat tmp;
- if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
+ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+ return -EOVERFLOW;
+ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
return -EOVERFLOW;
#if BITS_PER_LONG == 32
if (stat->size > MAX_NON_LFS)
@@ -367,7 +366,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
#endif
INIT_STRUCT_STAT_PADDING(tmp);
- tmp.st_dev = encode_dev(stat->dev);
+ tmp.st_dev = new_encode_dev(stat->dev);
tmp.st_ino = stat->ino;
if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
return -EOVERFLOW;
@@ -377,7 +376,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
- tmp.st_rdev = encode_dev(stat->rdev);
+ tmp.st_rdev = new_encode_dev(stat->rdev);
tmp.st_size = stat->size;
tmp.st_atime = stat->atime.tv_sec;
tmp.st_mtime = stat->mtime.tv_sec;
@@ -665,11 +664,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
{
struct compat_stat tmp;
- if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+ if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+ return -EOVERFLOW;
+ if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
return -EOVERFLOW;
memset(&tmp, 0, sizeof(tmp));
- tmp.st_dev = old_encode_dev(stat->dev);
+ tmp.st_dev = new_encode_dev(stat->dev);
tmp.st_ino = stat->ino;
if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
return -EOVERFLOW;
@@ -679,7 +680,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
return -EOVERFLOW;
SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
- tmp.st_rdev = old_encode_dev(stat->rdev);
+ tmp.st_rdev = new_encode_dev(stat->rdev);
if ((u64) stat->size > MAX_NON_LFS)
return -EOVERFLOW;
tmp.st_size = stat->size;
diff --git a/fs/xattr.c b/fs/xattr.c
index 5c8c5175b385..998045165916 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -569,7 +569,8 @@ setxattr(struct user_namespace *mnt_userns, struct dentry *d,
}
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size);
+ posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d),
+ kvalue, size);
}
error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags);
@@ -667,7 +668,8 @@ getxattr(struct user_namespace *mnt_userns, struct dentry *d,
if (error > 0) {
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error);
+ posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d),
+ kvalue, error);
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e1afb9e503e1..bf4e60871068 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -406,7 +406,7 @@ xfs_buf_alloc_pages(
STATIC int
_xfs_buf_map_pages(
struct xfs_buf *bp,
- uint flags)
+ xfs_buf_flags_t flags)
{
ASSERT(bp->b_flags & _XBF_PAGES);
if (bp->b_page_count == 1) {
@@ -868,7 +868,7 @@ xfs_buf_read_uncached(
struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t numblks,
- int flags,
+ xfs_buf_flags_t flags,
struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
@@ -903,7 +903,7 @@ int
xfs_buf_get_uncached(
struct xfs_buftarg *target,
size_t numblks,
- int flags,
+ xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
int error;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index edcb6254fa6a..1ee3056ff9cf 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -22,28 +22,28 @@ struct xfs_buf;
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
-#define XBF_READ (1 << 0) /* buffer intended for reading from device */
-#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
-#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
-#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
-#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
-#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
-#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
-#define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */
+#define XBF_READ (1u << 0) /* buffer intended for reading from device */
+#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
+#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
+#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
+#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
+#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
+#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
+#define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
/* buffer type flags for write callbacks */
-#define _XBF_INODES (1 << 16)/* inode buffer */
-#define _XBF_DQUOTS (1 << 17)/* dquot buffer */
-#define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */
+#define _XBF_INODES (1u << 16)/* inode buffer */
+#define _XBF_DQUOTS (1u << 17)/* dquot buffer */
+#define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
/* flags used only internally */
-#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
-#define _XBF_KMEM (1 << 21)/* backed by heap memory */
-#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
+#define _XBF_PAGES (1u << 20)/* backed by refcounted pages */
+#define _XBF_KMEM (1u << 21)/* backed by heap memory */
+#define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
/* flags used only as arguments to access routines */
-#define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */
-#define XBF_UNMAPPED (1 << 31)/* do not map the buffer */
+#define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
+#define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
typedef unsigned int xfs_buf_flags_t;
@@ -58,7 +58,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_WRITE_FAIL, "WRITE_FAIL" }, \
{ _XBF_INODES, "INODES" }, \
{ _XBF_DQUOTS, "DQUOTS" }, \
- { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
+ { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
@@ -247,11 +247,11 @@ xfs_buf_readahead(
return xfs_buf_readahead_map(target, &map, 1, ops);
}
-int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
- struct xfs_buf **bpp);
+int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
+ xfs_buf_flags_t flags, struct xfs_buf **bpp);
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
- size_t numblks, int flags, struct xfs_buf **bpp,
- const struct xfs_buf_ops *ops);
+ size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
+ const struct xfs_buf_ops *ops);
int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
void xfs_buf_hold(struct xfs_buf *bp);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 9de6205fe134..39ae53efb3ab 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2594,14 +2594,13 @@ xfs_ifree_cluster(
}
/*
- * This is called to return an inode to the inode free list.
- * The inode should already be truncated to 0 length and have
- * no pages associated with it. This routine also assumes that
- * the inode is already a part of the transaction.
+ * This is called to return an inode to the inode free list. The inode should
+ * already be truncated to 0 length and have no pages associated with it. This
+ * routine also assumes that the inode is already a part of the transaction.
*
- * The on-disk copy of the inode will have been added to the list
- * of unlinked inodes in the AGI. We need to remove the inode from
- * that list atomically with respect to freeing it here.
+ * The on-disk copy of the inode will have been added to the list of unlinked
+ * inodes in the AGI. We need to remove the inode from that list atomically with
+ * respect to freeing it here.
*/
int
xfs_ifree(
@@ -2623,13 +2622,16 @@ xfs_ifree(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
/*
- * Pull the on-disk inode from the AGI unlinked list.
+ * Free the inode first so that we guarantee that the AGI lock is going
+ * to be taken before we remove the inode from the unlinked list. This
+ * makes the AGI lock -> unlinked list modification order the same as
+ * used in O_TMPFILE creation.
*/
- error = xfs_iunlink_remove(tp, pag, ip);
+ error = xfs_difree(tp, pag, ip->i_ino, &xic);
if (error)
- goto out;
+ return error;
- error = xfs_difree(tp, pag, ip->i_ino, &xic);
+ error = xfs_iunlink_remove(tp, pag, ip);
if (error)
goto out;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index de177842b951..0c82673238f4 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -175,7 +175,7 @@ xfs_trans_get_buf(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
int numblks,
- uint flags,
+ xfs_buf_flags_t flags,
struct xfs_buf **bpp)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 3614c7834007..e20e7c841489 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -35,6 +35,17 @@ static inline int zonefs_zone_mgmt(struct inode *inode,
lockdep_assert_held(&zi->i_truncate_mutex);
+ /*
+ * With ZNS drives, closing an explicitly open zone that has not been
+ * written will change the zone state to "closed", that is, the zone
+ * will remain active. Since this can then cause failure of explicit
+ * open operation on other zones if the drive active zone resources
+ * are exceeded, make sure that the zone does not remain active by
+ * resetting it.
+ */
+ if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
+ op = REQ_OP_ZONE_RESET;
+
trace_zonefs_zone_mgmt(inode, op);
ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
@@ -1142,6 +1153,7 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
inode_init_once(&zi->i_vnode);
mutex_init(&zi->i_truncate_mutex);
zi->i_wr_refcnt = 0;
+ zi->i_flags = 0;
return &zi->i_vnode;
}
@@ -1293,12 +1305,13 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
inc_nlink(parent);
}
-static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
- enum zonefs_ztype type)
+static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
+ enum zonefs_ztype type)
{
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ int ret = 0;
inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
inode->i_mode = S_IFREG | sbi->s_perm;
@@ -1323,6 +1336,22 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
+
+ /*
+ * For sequential zones, make sure that any open zone is closed first
+ * to ensure that the initial number of open zones is 0, in sync with
+ * the open zone accounting done when the mount option
+ * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+ */
+ if (type == ZONEFS_ZTYPE_SEQ &&
+ (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
+ zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
+ mutex_lock(&zi->i_truncate_mutex);
+ ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ return ret;
}
static struct dentry *zonefs_create_inode(struct dentry *parent,
@@ -1332,6 +1361,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
struct inode *dir = d_inode(parent);
struct dentry *dentry;
struct inode *inode;
+ int ret;
dentry = d_alloc_name(parent, name);
if (!dentry)
@@ -1342,10 +1372,16 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
goto dput;
inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
- if (zone)
- zonefs_init_file_inode(inode, zone, type);
- else
+ if (zone) {
+ ret = zonefs_init_file_inode(inode, zone, type);
+ if (ret) {
+ iput(inode);
+ goto dput;
+ }
+ } else {
zonefs_init_dir_inode(dir, inode, type);
+ }
+
d_add(dentry, inode);
dir->i_size++;
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index edb0e2a602a8..ba1f860af38b 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -21,6 +21,12 @@
#include <linux/panic.h>
#include <linux/printk.h>
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
+
#ifdef CONFIG_BUG
#ifdef CONFIG_GENERIC_BUG
@@ -110,11 +116,6 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
#endif
/* used internally by panic.c */
-struct warn_args;
-struct pt_regs;
-
-void __warn(const char *file, int line, void *caller, unsigned taint,
- struct pt_regs *regs, struct warn_args *args);
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 8fc637379899..df30f11b4a46 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -143,7 +143,7 @@ static inline void put_unaligned_be48(const u64 val, void *p)
static inline u64 __get_unaligned_be48(const u8 *p)
{
- return (u64)p[0] << 40 | (u64)p[1] << 32 | p[2] << 24 |
+ return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
p[3] << 16 | p[4] << 8 | p[5];
}
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 3c937f8bdb42..9e3aff7e68bb 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -354,6 +354,7 @@
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
+# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
@@ -368,6 +369,7 @@
# define DP_PSR_SETUP_TIME_SHIFT 1
# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
+# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */
#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */
#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */
diff --git a/include/dt-bindings/clock/microchip,mpfs-clock.h b/include/dt-bindings/clock/microchip,mpfs-clock.h
index 73f2a9324857..4048669bf756 100644
--- a/include/dt-bindings/clock/microchip,mpfs-clock.h
+++ b/include/dt-bindings/clock/microchip,mpfs-clock.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Daire McNamara,<daire.mcnamara@microchip.com>
- * Copyright (C) 2020 Microchip Technology Inc. All rights reserved.
+ * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
*/
#ifndef _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
@@ -42,4 +42,7 @@
#define CLK_ATHENA 31
#define CLK_CFM 32
+#define CLK_RTCREF 33
+#define CLK_MSSPLL 34
+
#endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 9cf51e41e697..54dc2f9a2d56 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -167,7 +167,7 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
-void cpu_startup_entry(enum cpuhp_state state);
+void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
deleted file mode 100644
index 19fa0b5ae5ec..000000000000
--- a/include/linux/dma-buf-map.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Pointer to dma-buf-mapped memory, plus helpers.
- */
-
-#ifndef __DMA_BUF_MAP_H__
-#define __DMA_BUF_MAP_H__
-
-#include <linux/io.h>
-#include <linux/string.h>
-
-/**
- * DOC: overview
- *
- * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
- * Depending on the location of the buffer, users may have to access it with
- * I/O operations or memory load/store operations. For example, copying to
- * system memory could be done with memcpy(), copying to I/O memory would be
- * done with memcpy_toio().
- *
- * .. code-block:: c
- *
- * void *vaddr = ...; // pointer to system memory
- * memcpy(vaddr, src, len);
- *
- * void *vaddr_iomem = ...; // pointer to I/O memory
- * memcpy_toio(vaddr, _iomem, src, len);
- *
- * When using dma-buf's vmap operation, the returned pointer is encoded as
- * :c:type:`struct dma_buf_map <dma_buf_map>`.
- * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
- * system or I/O memory and a flag that signals the required method of
- * accessing the buffer. Use the returned instance and the helper functions
- * to access the buffer's memory in the correct way.
- *
- * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
- * actually independent from the dma-buf infrastructure. When sharing buffers
- * among devices, drivers have to know the location of the memory to access
- * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
- * solves this problem for dma-buf and its users. If other drivers or
- * sub-systems require similar functionality, the type could be generalized
- * and moved to a more prominent header file.
- *
- * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
- * considered bad style. Rather then accessing its fields directly, use one
- * of the provided helper functions, or implement your own. For example,
- * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
- * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
- * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
- *
- * .. code-block:: c
- *
- * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
- *
- * dma_buf_map_set_vaddr(&map, 0xdeadbeaf);
- *
- * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
- *
- * .. code-block:: c
- *
- * dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf);
- *
- * Instances of struct dma_buf_map do not have to be cleaned up, but
- * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
- * always refer to system memory.
- *
- * .. code-block:: c
- *
- * dma_buf_map_clear(&map);
- *
- * Test if a mapping is valid with either dma_buf_map_is_set() or
- * dma_buf_map_is_null().
- *
- * .. code-block:: c
- *
- * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
- * // always true
- *
- * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
- * for equality with dma_buf_map_is_equal(). Mappings the point to different
- * memory spaces, system or I/O, are never equal. That's even true if both
- * spaces are located in the same address space, both mappings contain the
- * same address value, or both mappings refer to NULL.
- *
- * .. code-block:: c
- *
- * struct dma_buf_map sys_map; // refers to system memory
- * struct dma_buf_map io_map; // refers to I/O memory
- *
- * if (dma_buf_map_is_equal(&sys_map, &io_map))
- * // always false
- *
- * A set up instance of struct dma_buf_map can be used to access or manipulate
- * the buffer memory. Depending on the location of the memory, the provided
- * helpers will pick the correct operations. Data can be copied into the memory
- * with dma_buf_map_memcpy_to(). The address can be manipulated with
- * dma_buf_map_incr().
- *
- * .. code-block:: c
- *
- * const void *src = ...; // source buffer
- * size_t len = ...; // length of src
- *
- * dma_buf_map_memcpy_to(&map, src, len);
- * dma_buf_map_incr(&map, len); // go to first byte after the memcpy
- */
-
-/**
- * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
- * @vaddr_iomem: The buffer's address if in I/O memory
- * @vaddr: The buffer's address if in system memory
- * @is_iomem: True if the dma-buf memory is located in I/O
- * memory, or false otherwise.
- */
-struct dma_buf_map {
- union {
- void __iomem *vaddr_iomem;
- void *vaddr;
- };
- bool is_iomem;
-};
-
-/**
- * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
- * @vaddr_: A system-memory address
- */
-#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
- { \
- .vaddr = (vaddr_), \
- .is_iomem = false, \
- }
-
-/**
- * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
- * @map: The dma-buf mapping structure
- * @vaddr: A system-memory address
- *
- * Sets the address and clears the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
-{
- map->vaddr = vaddr;
- map->is_iomem = false;
-}
-
-/**
- * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
- * @map: The dma-buf mapping structure
- * @vaddr_iomem: An I/O-memory address
- *
- * Sets the address and the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
- void __iomem *vaddr_iomem)
-{
- map->vaddr_iomem = vaddr_iomem;
- map->is_iomem = true;
-}
-
-/**
- * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
- * @lhs: The dma-buf mapping structure
- * @rhs: A dma-buf mapping structure to compare with
- *
- * Two dma-buf mapping structures are equal if they both refer to the same type of memory
- * and to the same address within that memory.
- *
- * Returns:
- * True is both structures are equal, or false otherwise.
- */
-static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
- const struct dma_buf_map *rhs)
-{
- if (lhs->is_iomem != rhs->is_iomem)
- return false;
- else if (lhs->is_iomem)
- return lhs->vaddr_iomem == rhs->vaddr_iomem;
- else
- return lhs->vaddr == rhs->vaddr;
-}
-
-/**
- * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
- * @map: The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping is NULL.
- *
- * Returns:
- * True if the mapping is NULL, or false otherwise.
- */
-static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
-{
- if (map->is_iomem)
- return !map->vaddr_iomem;
- return !map->vaddr;
-}
-
-/**
- * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
- * @map: The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping has been set.
- *
- * Returns:
- * True if the mapping is been set, or false otherwise.
- */
-static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
-{
- return !dma_buf_map_is_null(map);
-}
-
-/**
- * dma_buf_map_clear - Clears a dma-buf mapping structure
- * @map: The dma-buf mapping structure
- *
- * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
- * mapping structures that were set to point to I/O memory are reset for
- * system memory. Pointers are cleared to NULL. This is the default.
- */
-static inline void dma_buf_map_clear(struct dma_buf_map *map)
-{
- if (map->is_iomem) {
- map->vaddr_iomem = NULL;
- map->is_iomem = false;
- } else {
- map->vaddr = NULL;
- }
-}
-
-/**
- * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
- * @dst: The dma-buf mapping structure
- * @src: The source buffer
- * @len: The number of byte in src
- *
- * Copies data into a dma-buf mapping. The source buffer is in system
- * memory. Depending on the buffer's location, the helper picks the correct
- * method of accessing the memory.
- */
-static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
-{
- if (dst->is_iomem)
- memcpy_toio(dst->vaddr_iomem, src, len);
- else
- memcpy(dst->vaddr, src, len);
-}
-
-/**
- * dma_buf_map_incr - Increments the address stored in a dma-buf mapping
- * @map: The dma-buf mapping structure
- * @incr: The number of bytes to increment
- *
- * Increments the address stored in a dma-buf mapping. Depending on the
- * buffer's location, the correct value will be updated.
- */
-static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
-{
- if (map->is_iomem)
- map->vaddr_iomem += incr;
- else
- map->vaddr += incr;
-}
-
-#endif /* __DMA_BUF_MAP_H__ */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6727fb0db619..e25539072463 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -573,7 +573,6 @@ int fscache_write(struct netfs_cache_resources *cres,
/**
* fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
- * @cookie: The cookie representing the cache object
* @mapping: The netfs inode to use as the source
* @start: The start position in @mapping
* @len: The amount of data to unlock
@@ -582,8 +581,7 @@ int fscache_write(struct netfs_cache_resources *cres,
* Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
* waiting.
*/
-static inline void fscache_clear_page_bits(struct fscache_cookie *cookie,
- struct address_space *mapping,
+static inline void fscache_clear_page_bits(struct address_space *mapping,
loff_t start, size_t len,
bool caching)
{
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index c3aa8b330e1c..e71f6e1bfafe 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -688,7 +688,7 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
@@ -705,6 +705,12 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
return -ENXIO;
}
+static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
+ char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 53c1b6082a4c..ac2a1d758a80 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -169,6 +169,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
bool isolate_huge_page(struct page *page, struct list_head *list);
int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
void putback_active_hugepage(struct page *page);
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
@@ -378,6 +379,11 @@ static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
return 0;
}
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+ return 0;
+}
+
static inline void putback_active_hugepage(struct page *page)
{
}
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 08ba5995aa8b..fe6efb24d151 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -64,15 +64,6 @@
)
/**
- * lower_48_bits() - return bits 0-47 of a number
- * @n: the number we're accessing
- */
-static inline u64 lower_48_bits(u64 n)
-{
- return n & ((1ull << 48) - 1);
-}
-
-/**
* upper_32_bits - return bits 32-63 of a number
* @n: the number we're accessing
*
@@ -294,7 +285,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
return buf;
}
-extern int hex_to_bin(char ch);
+extern int hex_to_bin(unsigned char ch);
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
extern char *bin2hex(char *dst, const void *src, size_t count);
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index f49e64222628..726857a4b680 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
*/
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
#else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; }
@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
return false;
}
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
#endif
#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3f9b22c4983a..34eed5f85ed6 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -315,7 +315,10 @@ struct kvm_vcpu {
int cpu;
int vcpu_id; /* id given by userspace at creation */
int vcpu_idx; /* index in kvm->vcpus array */
- int srcu_idx;
+ int ____srcu_idx; /* Don't use this directly. You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+ int srcu_depth;
+#endif
int mode;
u64 requests;
unsigned long guest_debug;
@@ -840,6 +843,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
unlikely(__ret); \
})
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(vcpu->srcu_depth++,
+ "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+ vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(--vcpu->srcu_depth,
+ "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
{
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
@@ -2197,6 +2219,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end);
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
+
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
#else
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
new file mode 100644
index 000000000000..587f25128848
--- /dev/null
+++ b/include/linux/mei_aux.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_MEI_AUX_H
+#define _LINUX_MEI_AUX_H
+
+#include <linux/auxiliary_bus.h>
+
+struct mei_aux_device {
+ struct auxiliary_device aux_dev;
+ int irq;
+ struct resource bar;
+};
+
+#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct mei_aux_device, aux_dev)
+
+#endif /* _LINUX_MEI_AUX_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index a68dce3873fc..89b14729d59f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1012,6 +1012,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
}
void mem_cgroup_flush_stats(void);
+void mem_cgroup_flush_stats_delayed(void);
void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
@@ -1455,6 +1456,10 @@ static inline void mem_cgroup_flush_stats(void)
{
}
+static inline void mem_cgroup_flush_stats_delayed(void)
+{
+}
+
static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e34edb775334..9f44254af8ce 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3197,6 +3197,14 @@ extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+#else
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+ return 0;
+}
+#endif
#ifndef arch_memory_failure
static inline int arch_memory_failure(unsigned long pfn, int flags)
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 151607e9d64a..955aee14b0f7 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -389,10 +389,8 @@ struct mtd_info {
/* List of partitions attached to this MTD device */
struct list_head partitions;
- union {
- struct mtd_part part;
- struct mtd_master master;
- };
+ struct mtd_part part;
+ struct mtd_master master;
};
static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 59e27a2b7bf0..b1fbe21650bb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -199,10 +199,10 @@ struct net_device_stats {
* Try to fit them in a single cache line, for dev_get_stats() sake.
*/
struct net_device_core_stats {
- local_t rx_dropped;
- local_t tx_dropped;
- local_t rx_nohandler;
-} __aligned(4 * sizeof(local_t));
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ unsigned long rx_nohandler;
+} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
#include <linux/skbuff.h>
@@ -3843,15 +3843,15 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false;
}
-struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev);
+struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
-static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev)
+static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
{
/* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
if (likely(p))
- return this_cpu_ptr(p);
+ return p;
return netdev_core_stats_alloc(dev);
}
@@ -3859,14 +3859,11 @@ static inline struct net_device_core_stats *dev_core_stats(struct net_device *de
#define DEV_CORE_STATS_INC(FIELD) \
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
{ \
- struct net_device_core_stats *p; \
+ struct net_device_core_stats __percpu *p; \
\
- preempt_disable(); \
p = dev_core_stats(dev); \
- \
if (p) \
- local_inc(&p->FIELD); \
- preempt_enable(); \
+ this_cpu_inc(p->FIELD); \
}
DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 060e8d203181..1766e1de6956 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -34,15 +34,19 @@ posix_acl_xattr_count(size_t size)
#ifdef CONFIG_FS_POSIX_ACL
void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ struct inode *inode,
void *value, size_t size);
void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+ struct inode *inode,
void *value, size_t size);
#else
static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+ struct inode *inode,
void *value, size_t size)
{
}
static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+ struct inode *inode,
void *value, size_t size)
{
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d5e3c00b74e1..a8911b1f35aa 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1443,6 +1443,7 @@ struct task_struct {
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
+ struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index a80356e9dc69..1ad1f4bfa025 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -136,6 +136,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr) (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
extern void arch_pick_mmap_layout(struct mm_struct *mm,
struct rlimit *rlim_stack);
extern unsigned long
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index a5dda4987e8b..217711fc9cac 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -395,6 +395,7 @@ struct svc_deferred_req {
size_t addrlen;
struct sockaddr_storage daddr; /* where reply must come from */
size_t daddrlen;
+ void *xprt_ctxt;
struct cache_deferred_req handle;
size_t xprt_hlen;
int argslen;
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index a4b1af581f69..248f4ac95642 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -59,6 +59,15 @@ struct crc64_pi_tuple {
__u8 ref_tag[6];
};
+/**
+ * lower_48_bits() - return bits 0-47 of a number
+ * @n: the number we're accessing
+ */
+static inline u64 lower_48_bits(u64 n)
+{
+ return n & ((1ull << 48) - 1);
+}
+
static inline u64 ext_pi_ref_tag(struct request *rq)
{
unsigned int shift = ilog2(queue_logical_block_size(rq->q));
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 059b18eb1f1f..5745c90c8800 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -75,7 +75,7 @@
* By default we use get_cycles() for this purpose, but individual
* architectures may override this in their asm/timex.h header file.
*/
-#define random_get_entropy() get_cycles()
+#define random_get_entropy() ((unsigned long)get_cycles())
#endif
/*
diff --git a/include/linux/usb/pd_bdo.h b/include/linux/usb/pd_bdo.h
index 033fe3e17141..7c25b88d79f9 100644
--- a/include/linux/usb/pd_bdo.h
+++ b/include/linux/usb/pd_bdo.h
@@ -15,7 +15,7 @@
#define BDO_MODE_CARRIER2 (5 << 28)
#define BDO_MODE_CARRIER3 (6 << 28)
#define BDO_MODE_EYE (7 << 28)
-#define BDO_MODE_TESTDATA (8 << 28)
+#define BDO_MODE_TESTDATA (8U << 28)
#define BDO_MODE_MASK(mode) ((mode) & 0xf0000000)
diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h
index 74a4a0f17b28..48f2dd3c568c 100644
--- a/include/linux/vfio_pci_core.h
+++ b/include/linux/vfio_pci_core.h
@@ -133,6 +133,8 @@ struct vfio_pci_core_device {
struct mutex ioeventfds_lock;
struct list_head ioeventfds_list;
struct vfio_pci_vf_token *vf_token;
+ struct list_head sriov_pfs_item;
+ struct vfio_pci_core_device *sriov_pf_core_dev;
struct notifier_block nb;
struct mutex vma_lock;
struct list_head vma_list;
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3b1df7da402d..b159c2789961 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -26,7 +26,7 @@ struct notifier_block; /* in notifier.h */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
-#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
+#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
@@ -153,7 +153,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller) __alloc_size(1);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __alloc_size(1);
-void *vmalloc_no_huge(unsigned long size) __alloc_size(1);
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
index 7c93f5177532..9c0ad64b8d29 100644
--- a/include/memory/renesas-rpc-if.h
+++ b/include/memory/renesas-rpc-if.h
@@ -72,6 +72,7 @@ struct rpcif {
enum rpcif_type type;
enum rpcif_data_dir dir;
u8 bus_size;
+ u8 xfer_size;
void *buffer;
u32 xferlen;
u32 smcr;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5cb095b09a94..69ef31cea582 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -578,6 +578,7 @@ enum {
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
+#define HCI_ERROR_INVALID_PARAMETERS 0x12
#define HCI_ERROR_REMOTE_USER_TERM 0x13
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index d5377740e99c..8abd08245326 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1156,7 +1156,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
-void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+void hci_conn_failed(struct hci_conn *conn, u8 status);
/*
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
diff --git a/include/net/esp.h b/include/net/esp.h
index 90cd02ff77ef..9c5637d41d95 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -4,8 +4,6 @@
#include <linux/skbuff.h>
-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
-
struct ip_esp_hdr;
static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index aa33e1092e2c..9f65f1bfbd24 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
__be16 vlan_tci;
};
__be16 vlan_tpid;
+ __be16 vlan_eth_type;
+ u16 padding;
};
struct flow_dissector_mpls_lse {
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index a38c4f1e4e5c..74b369bddf49 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -58,7 +58,7 @@ struct ip6_tnl {
/* These fields used only by GRE */
__u32 i_seqno; /* The last seen seqno */
- __u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int hlen; /* tun_hlen + encap_hlen */
int tun_hlen; /* Precalculated header length */
int encap_hlen; /* Encap header length (FOU,GUE) */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 0219fe907b26..c24fa934221d 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -116,7 +116,7 @@ struct ip_tunnel {
/* These four fields used only by GRE */
u32 i_seqno; /* The last seen seqno */
- u32 o_seqno; /* The last output seqno */
+ atomic_t o_seqno; /* The last output seqno */
int tun_hlen; /* Precalculated header length */
/* These four fields used only by ERSPAN */
@@ -243,11 +243,18 @@ static inline __be32 tunnel_id_to_key32(__be64 tun_id)
static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
int proto,
__be32 daddr, __be32 saddr,
- __be32 key, __u8 tos, int oif,
+ __be32 key, __u8 tos,
+ struct net *net, int oif,
__u32 mark, __u32 tun_inner_hash)
{
memset(fl4, 0, sizeof(*fl4));
- fl4->flowi4_oif = oif;
+
+ if (oif) {
+ fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+ /* Legacy VRF/l3mdev use case */
+ fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
+ }
+
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->flowi4_tos = tos;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 3d83b64471d3..b4af4837d80b 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -75,8 +75,8 @@ struct netns_ipv6 {
struct list_head fib6_walkers;
rwlock_t fib6_walker_lock;
spinlock_t fib6_gc_lock;
- unsigned int ip6_rt_gc_expire;
- unsigned long ip6_rt_last_gc;
+ atomic_t ip6_rt_gc_expire;
+ unsigned long ip6_rt_last_gc;
unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
bool fib6_has_custom_rules;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 70ca4a5e330a..cc1295037533 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -480,6 +480,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+ const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
@@ -620,6 +621,7 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk, struct sk_buff *skb);
void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
void tcp_fin(struct sock *sk);
+void tcp_check_space(struct sock *sk);
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
@@ -1042,6 +1044,7 @@ struct rate_sample {
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
u32 prior_in_flight; /* in flight before this ACK */
+ u32 last_end_seq; /* end_seq of most recently ACKed packet */
bool is_app_limited; /* is sample from packet with bubble in pipe? */
bool is_retrans; /* is sample from retransmission? */
bool is_ack_delayed; /* is this (likely) a delayed ACK? */
@@ -1164,6 +1167,11 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
bool is_sack_reneg, struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk);
+static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
+{
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
+}
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 5554ee75e7da..647722e847b4 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
u16 queue_id, u16 flags);
int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
struct net_device *dev, u16 queue_id);
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_get_pool(struct xsk_buff_pool *pool);
bool xp_put_pool(struct xsk_buff_pool *pool);
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index e76c94697c1b..d0a24779c52d 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -53,8 +53,10 @@ enum {
#define ISID_SIZE 6
-/* Connection suspend "bit" */
-#define ISCSI_SUSPEND_BIT 1
+/* Connection flags */
+#define ISCSI_CONN_FLAG_SUSPEND_TX BIT(0)
+#define ISCSI_CONN_FLAG_SUSPEND_RX BIT(1)
+#define ISCSI_CONN_FLAG_BOUND BIT(2)
#define ISCSI_ITT_MASK 0x1fff
#define ISCSI_TOTAL_CMDS_MAX 4096
@@ -211,8 +213,7 @@ struct iscsi_conn {
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
- unsigned long suspend_tx; /* suspend Tx */
- unsigned long suspend_rx; /* suspend Rx */
+ unsigned long flags; /* ISCSI_CONN_FLAGs */
/* negotiated params */
unsigned max_recv_dlength; /* initiator_max_recv_dsl*/
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 38e4a67f5922..9acb8422f680 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -211,6 +211,8 @@ struct iscsi_cls_conn {
struct mutex ep_mutex;
struct iscsi_endpoint *ep;
+ /* Used when accessing flags and queueing work. */
+ spinlock_t lock;
unsigned long flags;
struct work_struct cleanup_work;
@@ -295,7 +297,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
struct iscsi_endpoint {
void *dd_data; /* LLD private data */
struct device dev;
- uint64_t id;
+ int id;
struct iscsi_cls_conn *conn;
};
diff --git a/include/sound/core.h b/include/sound/core.h
index b7e9b58d3c78..6d4cc49584c6 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -284,6 +284,7 @@ int snd_card_disconnect(struct snd_card *card);
void snd_card_disconnect_sync(struct snd_card *card);
int snd_card_free(struct snd_card *card);
int snd_card_free_when_closed(struct snd_card *card);
+int snd_card_free_on_error(struct device *dev, int ret);
void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
int snd_card_info_init(void);
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 653dfffb3ac8..8d79cebf95f3 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -51,6 +51,11 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
#endif
+/* fallback types, don't use those directly */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK 10
+#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK 11
+#endif
/*
* info for buffer allocation
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index a52080407b98..766dc6f009c0 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -179,6 +179,10 @@ struct snd_soc_component_driver {
struct snd_pcm_hw_params *params);
bool use_dai_pcm_id; /* use DAI link PCM ID as PCM device number */
int be_pcm_base; /* base device ID for all BE PCMs */
+
+#ifdef CONFIG_DEBUG_FS
+ const char *debugfs_prefix;
+#endif
};
struct snd_soc_component {
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 5337acfe1e9c..3995c58a1c51 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -2015,17 +2015,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_STRUCT__entry(
__field(const void *, dr)
__field(u32, xid)
- __string(addr, dr->xprt->xpt_remotebuf)
+ __array(__u8, addr, INET6_ADDRSTRLEN + 10)
),
TP_fast_assign(
__entry->dr = dr;
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
- __assign_str(addr, dr->xprt->xpt_remotebuf);
+ snprintf(__entry->addr, sizeof(__entry->addr) - 1,
+ "%pISpc", (struct sockaddr *)&dr->addr);
),
- TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+ TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr,
__entry->xid)
);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 05c3642aaece..35ca528803fd 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -172,7 +172,9 @@ enum drm_i915_gem_engine_class {
I915_ENGINE_CLASS_INVALID = -1
};
-/*
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
* There may be more than one engine fulfilling any role within the system.
* Each engine of a class is given a unique instance number and therefore
* any engine can be specified by its class:instance tuplet. APIs that allow
@@ -180,10 +182,21 @@ enum drm_i915_gem_engine_class {
* for this identification.
*/
struct i915_engine_class_instance {
- __u16 engine_class; /* see enum drm_i915_gem_engine_class */
- __u16 engine_instance;
+ /**
+ * @engine_class:
+ *
+ * Engine class from enum drm_i915_gem_engine_class
+ */
+ __u16 engine_class;
#define I915_ENGINE_CLASS_INVALID_NONE -1
#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+ /**
+ * @engine_instance:
+ *
+ * Engine instance.
+ */
+ __u16 engine_instance;
};
/**
@@ -2657,24 +2670,65 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
-/*
+/**
+ * struct drm_i915_perf_oa_config
+ *
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
- /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ /**
+ * @uuid:
+ *
+ * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+ */
char uuid[36];
+ /**
+ * @n_mux_regs:
+ *
+ * Number of mux regs in &mux_regs_ptr.
+ */
__u32 n_mux_regs;
+
+ /**
+ * @n_boolean_regs:
+ *
+ * Number of boolean regs in &boolean_regs_ptr.
+ */
__u32 n_boolean_regs;
+
+ /**
+ * @n_flex_regs:
+ *
+ * Number of flex regs in &flex_regs_ptr.
+ */
__u32 n_flex_regs;
- /*
- * These fields are pointers to tuples of u32 values (register address,
- * value). For example the expected length of the buffer pointed by
- * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ /**
+ * @mux_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_mux_regs).
*/
__u64 mux_regs_ptr;
+
+ /**
+ * @boolean_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_boolean_regs).
+ */
__u64 boolean_regs_ptr;
+
+ /**
+ * @flex_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_flex_regs).
+ */
__u64 flex_regs_ptr;
};
@@ -2685,12 +2739,24 @@ struct drm_i915_perf_oa_config {
* @data_ptr is also depends on the specific @query_id.
*/
struct drm_i915_query_item {
- /** @query_id: The id for this query */
+ /**
+ * @query_id:
+ *
+ * The id for this query. Currently accepted query IDs are:
+ * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+ * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+ * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+ * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+ * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ */
__u64 query_id;
-#define DRM_I915_QUERY_TOPOLOGY_INFO 1
-#define DRM_I915_QUERY_ENGINE_INFO 2
-#define DRM_I915_QUERY_PERF_CONFIG 3
-#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_HWCONFIG_BLOB 5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
/* Must be kept compact -- no holes and well documented */
/**
@@ -2706,14 +2772,17 @@ struct drm_i915_query_item {
/**
* @flags:
*
- * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
* following:
*
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ *
+ * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+ * a struct i915_engine_class_instance that references a render engine.
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
@@ -2771,66 +2840,112 @@ struct drm_i915_query {
__u64 items_ptr;
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
- *
- * data: contains the 3 pieces of information :
- *
- * - the slice mask with one bit per slice telling whether a slice is
- * available. The availability of slice X can be queried with the following
- * formula :
- *
- * (data[X / 8] >> (X % 8)) & 1
- *
- * - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. Gen12 has dual-subslices, which are
- * similar to two gen11 subslices. For gen12, this array represents dual-
- * subslices. The availability of subslice Y in slice X can be queried
- * with the following formula :
- *
- * (data[subslice_offset +
- * X * subslice_stride +
- * Y / 8] >> (Y % 8)) & 1
- *
- * - the EU mask for each subslice in each slice with one bit per EU telling
- * whether an EU is available. The availability of EU Z in subslice Y in
- * slice X can be queried with the following formula :
+/**
+ * struct drm_i915_query_topology_info
*
- * (data[eu_offset +
- * (X * max_subslices + Y) * eu_stride +
- * Z / 8] >> (Z % 8)) & 1
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
*/
struct drm_i915_query_topology_info {
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u16 flags;
+ /**
+ * @max_slices:
+ *
+ * The number of bits used to express the slice mask.
+ */
__u16 max_slices;
+
+ /**
+ * @max_subslices:
+ *
+ * The number of bits used to express the subslice mask.
+ */
__u16 max_subslices;
+
+ /**
+ * @max_eus_per_subslice:
+ *
+ * The number of bits in the EU mask that correspond to a single
+ * subslice's EUs.
+ */
__u16 max_eus_per_subslice;
- /*
+ /**
+ * @subslice_offset:
+ *
* Offset in data[] at which the subslice masks are stored.
*/
__u16 subslice_offset;
- /*
+ /**
+ * @subslice_stride:
+ *
* Stride at which each of the subslice masks for each slice are
* stored.
*/
__u16 subslice_stride;
- /*
+ /**
+ * @eu_offset:
+ *
* Offset in data[] at which the EU masks are stored.
*/
__u16 eu_offset;
- /*
+ /**
+ * @eu_stride:
+ *
* Stride at which each of the EU masks for each subslice are stored.
*/
__u16 eu_stride;
+ /**
+ * @data:
+ *
+ * Contains 3 pieces of information :
+ *
+ * - The slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the
+ * following formula :
+ *
+ * .. code:: c
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * Starting with Xe_HP platforms, Intel hardware no longer has
+ * traditional slices so i915 will always report a single slice
+ * (hardcoded slicemask = 0x1) which contains all of the platform's
+ * subslices. I.e., the mask here does not reflect any of the newer
+ * hardware concepts such as "gslices" or "cslices" since userspace
+ * is capable of inferring those from the subslice mask.
+ *
+ * - The subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Starting with Gen12 we use the
+ * term "subslice" to refer to what the hardware documentation
+ * describes as a "dual-subslices." The availability of subslice Y
+ * in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+ *
+ * - The EU mask for each subslice in each slice, with one bit per EU
+ * telling whether an EU is available. The availability of EU Z in
+ * subslice Y in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8
+ * ] >> (Z % 8)) & 1
+ */
__u8 data[];
};
@@ -2951,52 +3066,68 @@ struct drm_i915_query_engine_info {
struct drm_i915_engine_info engines[];
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
*/
struct drm_i915_query_perf_config {
union {
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
- * this fields to the number of configurations available.
+ /**
+ * @n_configs:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+ * the number of configurations available.
*/
__u64 n_configs;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @config:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*/
__u64 config;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @uuid:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*
* String formatted like "%08x-%04x-%04x-%04x-%012x"
*/
char uuid[36];
};
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
- * write an array of __u64 of configuration identifiers.
+ /**
+ * @data:
*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
- * write a struct drm_i915_perf_oa_config. If the following fields of
- * drm_i915_perf_oa_config are set not set to 0, i915 will write into
- * the associated pointers the values of submitted when the
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+ * i915 will write an array of __u64 of configuration identifiers.
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+ * i915 will write a struct drm_i915_perf_oa_config. If the following
+ * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+ * write into the associated pointers the values of submitted when the
* configuration was created :
*
- * - n_mux_regs
- * - n_boolean_regs
- * - n_flex_regs
+ * - &drm_i915_perf_oa_config.n_mux_regs
+ * - &drm_i915_perf_oa_config.n_boolean_regs
+ * - &drm_i915_perf_oa_config.n_flex_regs
*/
__u8 data[];
};
@@ -3135,6 +3266,16 @@ struct drm_i915_query_memory_regions {
};
/**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
* struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
* extension support using struct i915_user_extension.
*
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 787c657bfae8..7ce993e6786c 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -42,7 +42,7 @@ typedef __s64 Elf64_Sxword;
/* ARM MTE memory tag segment type */
-#define PT_ARM_MEMTAG_MTE (PT_LOPROC + 0x1)
+#define PT_AARCH64_MEMTAG_MTE (PT_LOPROC + 0x2)
/*
* Extended Numbering
diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
index 4c14e8be7267..3a49913d006c 100644
--- a/include/uapi/linux/fb.h
+++ b/include/uapi/linux/fb.h
@@ -182,7 +182,7 @@ struct fb_fix_screeninfo {
*
* For pseudocolor: offset and length should be the same for all color
* components. Offset specifies the position of the least significant bit
- * of the pallette index in a pixel value. Length indicates the number
+ * of the palette index in a pixel value. Length indicates the number
* of available palette entries (i.e. # of entries = 1 << length).
*/
struct fb_bitfield {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 7989d9483ea7..dff8e7f17074 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -662,6 +662,27 @@
/* Select an area of screen to be copied */
#define KEY_SELECTIVE_SCREENSHOT 0x27a
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT 0x27b
+#define KEY_PREVIOUS_ELEMENT 0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE 0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT 0x27e
+#define KEY_SOS 0x27f
+#define KEY_NAV_CHART 0x280
+#define KEY_FISHING_CHART 0x281
+#define KEY_SINGLE_RANGE_RADAR 0x282
+#define KEY_DUAL_RANGE_RADAR 0x283
+#define KEY_RADAR_OVERLAY 0x284
+#define KEY_TRADITIONAL_SONAR 0x285
+#define KEY_CLEARVU_SONAR 0x286
+#define KEY_SIDEVU_SONAR 0x287
+#define KEY_NAV_INFO 0x288
+#define KEY_BRIGHTNESS_MENU 0x289
+
/*
* Some keyboards have keys which do not have a defined meaning, these keys
* are intended to be programmed / bound to macros by the user. For most
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 784adc6f6ed2..1845cf7c80ba 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -296,6 +296,7 @@ struct io_uring_params {
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
#define IORING_FEAT_RSRC_TAGS (1U << 10)
#define IORING_FEAT_CQE_SKIP (1U << 11)
+#define IORING_FEAT_LINKED_FILE (1U << 12)
/*
* io_uring_register(2) opcodes and arguments
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 91a6fe4e02c0..6a184d260c7f 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -445,7 +445,13 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
- __u64 flags;
+ __u32 ndata;
+ union {
+#ifndef __KERNEL__
+ __u64 flags;
+#endif
+ __u64 data[16];
+ };
} system_event;
/* KVM_EXIT_S390_STSI */
struct {
@@ -1144,6 +1150,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_MEM_OP_EXTENSION 211
#define KVM_CAP_PMU_CAPABILITY 212
#define KVM_CAP_DISABLE_QUIRKS2 213
+/* #define KVM_CAP_VM_TSC_CONTROL 214 */
+#define KVM_CAP_SYSTEM_EVENT_DATA 215
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 3021ea25a284..7837ba4fe728 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_STDDEF_H
+#define _UAPI_LINUX_STDDEF_H
+
#include <linux/compiler_types.h>
#ifndef __always_inline
@@ -41,3 +44,4 @@
struct { } __empty_ ## NAME; \
TYPE NAME[]; \
}
+#endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5797c2a7a93f..d0a9aa0b42e8 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -71,7 +71,6 @@ struct cpuhp_cpu_state {
bool rollback;
bool single;
bool bringup;
- int cpu;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
@@ -475,7 +474,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
#endif
static inline enum cpuhp_state
-cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
bool bringup = st->state < target;
@@ -486,14 +485,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
st->target = target;
st->single = false;
st->bringup = bringup;
- if (cpu_dying(st->cpu) != !bringup)
- set_cpu_dying(st->cpu, !bringup);
+ if (cpu_dying(cpu) != !bringup)
+ set_cpu_dying(cpu, !bringup);
return prev_state;
}
static inline void
-cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
+cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
+ enum cpuhp_state prev_state)
{
bool bringup = !st->bringup;
@@ -520,8 +520,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
}
st->bringup = bringup;
- if (cpu_dying(st->cpu) != !bringup)
- set_cpu_dying(st->cpu, !bringup);
+ if (cpu_dying(cpu) != !bringup)
+ set_cpu_dying(cpu, !bringup);
}
/* Regular hotplug invocation of the AP hotplug thread */
@@ -541,15 +541,16 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
wait_for_ap_thread(st, st->bringup);
}
-static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
+ enum cpuhp_state target)
{
enum cpuhp_state prev_state;
int ret;
- prev_state = cpuhp_set_state(st, target);
+ prev_state = cpuhp_set_state(cpu, st, target);
__cpuhp_kick_ap(st);
if ((ret = st->result)) {
- cpuhp_reset_state(st, prev_state);
+ cpuhp_reset_state(cpu, st, prev_state);
__cpuhp_kick_ap(st);
}
@@ -581,7 +582,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
- return cpuhp_kick_ap(st, st->target);
+ return cpuhp_kick_ap(cpu, st, st->target);
}
static int bringup_cpu(unsigned int cpu)
@@ -704,7 +705,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
ret, cpu, cpuhp_get_step(st->state)->name,
st->state);
- cpuhp_reset_state(st, prev_state);
+ cpuhp_reset_state(cpu, st, prev_state);
if (can_rollback_cpu(st))
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
prev_state));
@@ -721,7 +722,6 @@ static void cpuhp_create(unsigned int cpu)
init_completion(&st->done_up);
init_completion(&st->done_down);
- st->cpu = cpu;
}
static int cpuhp_should_run(unsigned int cpu)
@@ -875,7 +875,7 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
cpuhp_lock_release(true);
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
- ret = cpuhp_kick_ap(st, st->target);
+ ret = cpuhp_kick_ap(cpu, st, st->target);
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
return ret;
@@ -1107,7 +1107,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
ret, cpu, cpuhp_get_step(st->state)->name,
st->state);
- cpuhp_reset_state(st, prev_state);
+ cpuhp_reset_state(cpu, st, prev_state);
if (st->state < prev_state)
WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
@@ -1134,7 +1134,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
cpuhp_tasks_frozen = tasks_frozen;
- prev_state = cpuhp_set_state(st, target);
+ prev_state = cpuhp_set_state(cpu, st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread.
@@ -1165,7 +1165,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
ret = cpuhp_down_callbacks(cpu, st, target);
if (ret && st->state < prev_state) {
if (st->state == CPUHP_TEARDOWN_CPU) {
- cpuhp_reset_state(st, prev_state);
+ cpuhp_reset_state(cpu, st, prev_state);
__cpuhp_kick_ap(st);
} else {
WARN(1, "DEAD callback error for CPU%d", cpu);
@@ -1352,7 +1352,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
cpuhp_tasks_frozen = tasks_frozen;
- cpuhp_set_state(st, target);
+ cpuhp_set_state(cpu, st, target);
/*
* If the current CPU state is in the range of the AP hotplug thread,
* then we need to kick the thread once more.
diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h
index 4632b0f4f72e..8a6cd53dbe8c 100644
--- a/kernel/dma/direct.h
+++ b/kernel/dma/direct.h
@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
if (unlikely(is_swiotlb_buffer(dev, phys)))
- swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+ swiotlb_tbl_unmap_single(dev, phys, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
#endif /* _KERNEL_DMA_DIRECT_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 23bb19716ad3..7858bafffa9d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6247,7 +6247,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
- if (event->rb->nr_pages != nr_pages) {
+ if (data_page_nr(event->rb) != nr_pages) {
ret = -EINVAL;
goto unlock;
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 082832738c8f..5150d5f84c03 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
}
#endif
+static inline int data_page_nr(struct perf_buffer *rb)
+{
+ return rb->nr_pages << page_order(rb);
+}
+
static inline unsigned long perf_data_size(struct perf_buffer *rb)
{
return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 52868716ec35..fb35b926024c 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
}
#else
-static int data_page_nr(struct perf_buffer *rb)
-{
- return rb->nr_pages << page_order(rb);
-}
-
static struct page *
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f7ff8919dc9b..fdf170404650 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
- node_to_cpumask[n]);
+ /* Ensure that only CPUs which are in both masks are set */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
if (++curvec == last_affv)
curvec = firstvec;
}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index f7df715ec28e..7afa40fe5cc4 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;
- kasan_record_aux_stack(work);
+ kasan_record_aux_stack_noalloc(work);
preempt_disable();
if (cpu != smp_processor_id()) {
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 475524bd900a..b3732b210593 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -475,8 +475,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
vma->vm_flags |= VM_DONTEXPAND;
for (off = 0; off < size; off += PAGE_SIZE) {
page = vmalloc_to_page(kcov->area + off);
- if (vm_insert_page(vma, vma->vm_start + off, page))
- WARN_ONCE(1, "vm_insert_page() failed");
+ res = vm_insert_page(vma, vma->vm_start + off, page);
+ if (res) {
+ pr_warn_once("kcov: vm_insert_page() failed\n");
+ return res;
+ }
}
return 0;
exit:
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index dbe57df2e199..dd58c0be9ce2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
struct kprobe_ctlblk *kcb;
/* The data must NOT be null. This means rethook data structure is broken. */
- if (WARN_ON_ONCE(!data))
+ if (WARN_ON_ONCE(!data) || !rp->handler)
return;
__this_cpu_write(current_kprobe, &rp->kp);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d4bd299d67ab..a68482d66535 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3829,11 +3829,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
se->avg.runnable_sum = se->avg.runnable_avg * divider;
- se->avg.load_sum = divider;
- if (se_weight(se)) {
- se->avg.load_sum =
- div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
- }
+ se->avg.load_sum = se->avg.load_avg * divider;
+ if (se_weight(se) < se->avg.load_sum)
+ se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
+ else
+ se->avg.load_sum = 1;
enqueue_load_avg(cfs_rq, se);
cfs_rq->avg.util_avg += se->avg.util_avg;
diff --git a/kernel/smp.c b/kernel/smp.c
index 01a7c1706a58..65a630f62363 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -579,7 +579,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
/* There shouldn't be any pending callbacks on an offline CPU. */
if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
- !warned && !llist_empty(head))) {
+ !warned && entry != NULL)) {
warned = true;
WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2d76c91b85de..d257721c68b8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -188,7 +188,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
#ifdef CONFIG_NO_HZ_FULL
- WARN_ON(tick_nohz_full_running);
+ WARN_ON_ONCE(tick_nohz_full_running);
#endif
tick_do_timer_cpu = cpu;
}
@@ -1538,7 +1538,7 @@ void tick_cancel_sched_timer(int cpu)
}
#endif
-/**
+/*
* Async notification about clocksource changes
*/
void tick_clock_notify(void)
@@ -1559,7 +1559,7 @@ void tick_oneshot_notify(void)
set_bit(0, &ts->check_clocks);
}
-/**
+/*
* Check, if a change happened, which makes oneshot possible.
*
* Called cyclic from the hrtimer softirq (driven by the timer
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021ad459..9dd2a39cb3b0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1722,11 +1722,14 @@ static inline void __run_timers(struct timer_base *base)
time_after_eq(jiffies, base->next_expiry)) {
levels = collect_expired_timers(base, heads);
/*
- * The only possible reason for not finding any expired
- * timer at this clk is that all matching timers have been
- * dequeued.
+ * The two possible reasons for not finding any expired
+ * timer at this clk are that all matching timers have been
+ * dequeued or no timer has been queued since
+ * base::next_expiry was set to base::clk +
+ * NEXT_TIMER_MAX_DELTA.
*/
- WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
+ WARN_ON_ONCE(!levels && !base->next_expiry_recalc
+ && base->timers_pending);
base->clk++;
base->next_expiry = __next_timer_interrupt(base);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 9301578f98e8..06833d404398 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -22,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper);
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ * '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ * shift of a negative value is implementation-defined, so we cast the
+ * value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ * the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ * in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ * ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ * uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ * lowercase to uppercase
*/
-int hex_to_bin(char ch)
+int hex_to_bin(unsigned char ch)
{
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- ch = tolower(ch);
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- return -1;
+ unsigned char cu = ch & 0xdf;
+ return -1 +
+ ((ch - '0' + 1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+ ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
}
EXPORT_SYMBOL(hex_to_bin);
@@ -45,10 +63,13 @@ EXPORT_SYMBOL(hex_to_bin);
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
- int hi = hex_to_bin(*src++);
- int lo = hex_to_bin(*src++);
+ int hi, lo;
- if ((hi < 0) || (lo < 0))
+ hi = hex_to_bin(*src++);
+ if (unlikely(hi < 0))
+ return -EINVAL;
+ lo = hex_to_bin(*src++);
+ if (unlikely(lo < 0))
return -EINVAL;
*dst++ = (hi << 4) | lo;
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index 08fc72d3ed16..6432b8c3e431 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -25,7 +25,7 @@
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
-static inline long do_strncpy_from_user(char *dst, const char __user *src,
+static __always_inline long do_strncpy_from_user(char *dst, const char __user *src,
unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index bffa0ebf9f8b..feeb935a2299 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -20,7 +20,7 @@
* if it fits in a aligned 'long'. The caller needs to check
* the return value against "> max".
*/
-static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long align, res = 0;
diff --git a/lib/xarray.c b/lib/xarray.c
index 4acc88ea7c21..54e646e8e6ee 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
if (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
+ if (node->shift && xa_is_node(entry))
+ entry = XA_RETRY_ENTRY;
}
xas->xa_offset = offset;
diff --git a/mm/compaction.c b/mm/compaction.c
index c3e37aa9ff9e..fe915db6149b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -26,6 +26,11 @@
#include "internal.h"
#ifdef CONFIG_COMPACTION
+/*
+ * Fragmentation score check interval for proactive compaction purposes.
+ */
+#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
+
static inline void count_compact_event(enum vm_event_item item)
{
count_vm_event(item);
@@ -51,11 +56,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
/*
- * Fragmentation score check interval for proactive compaction purposes.
- */
-static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
-
-/*
* Page order with-respect-to which proactive compaction
* calculates external fragmentation, which is used as
* the "fragmentation score" of a node/zone.
diff --git a/mm/filemap.c b/mm/filemap.c
index 3a5ffb5587cd..9a1eef6c5d35 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1063,12 +1063,6 @@ void __init pagecache_init(void)
init_waitqueue_head(&folio_wait_table[i]);
page_writeback_init();
-
- /*
- * tmpfs uses the ZERO_PAGE for reading holes: it is up-to-date,
- * and splice's page_cache_pipe_buf_confirm() needs to see that.
- */
- SetPageUptodate(ZERO_PAGE(0));
}
/*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b34f50156f7e..3fc721789743 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
int nr_nodes, node;
struct page *page;
- int rc = 0;
lockdep_assert_held(&hugetlb_lock);
@@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
}
for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
- if (!list_empty(&h->hugepage_freelists[node])) {
- page = list_entry(h->hugepage_freelists[node].next,
- struct page, lru);
- rc = demote_free_huge_page(h, page);
- break;
+ list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
+ if (PageHWPoison(page))
+ continue;
+
+ return demote_free_huge_page(h, page);
}
}
- return rc;
+ /*
+ * Only way to get here is if all pages on free lists are poisoned.
+ * Return -EBUSY so that caller will not retry.
+ */
+ return -EBUSY;
}
#define HSTATE_ATTR_RO(_name) \
@@ -6782,6 +6785,16 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
return ret;
}
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+ int ret;
+
+ spin_lock_irq(&hugetlb_lock);
+ ret = __get_huge_page_for_hwpoison(pfn, flags);
+ spin_unlock_irq(&hugetlb_lock);
+ return ret;
+}
+
void putback_active_hugepage(struct page *page)
{
spin_lock_irq(&hugetlb_lock);
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 07a76c46daa5..9e1b6544bfa8 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
#endif
-#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-
void kasan_enable_tagging(void)
{
if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
@@ -347,6 +345,9 @@ void kasan_enable_tagging(void)
else
hw_enable_tagging_sync();
}
+
+#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
+
EXPORT_SYMBOL_GPL(kasan_enable_tagging);
void kasan_force_async_fault(void)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index d79b83d673b1..b01b4bbe0409 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
#define hw_set_mem_tag_range(addr, size, tag, init) \
arch_set_mem_tag_range((addr), (size), (tag), (init))
+void kasan_enable_tagging(void);
+
#else /* CONFIG_KASAN_HW_TAGS */
#define hw_enable_tagging_sync()
#define hw_enable_tagging_async()
#define hw_enable_tagging_asymm()
+static inline void kasan_enable_tagging(void) { }
+
#endif /* CONFIG_KASAN_HW_TAGS */
#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-void kasan_enable_tagging(void);
void kasan_force_async_fault(void);
-#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
-static inline void kasan_enable_tagging(void) { }
static inline void kasan_force_async_fault(void) { }
-#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
#ifdef CONFIG_KASAN_SW_TAGS
u8 kasan_random_tag(void);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 08291ed33e93..0a9def8ce5e8 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -315,6 +315,13 @@ static void per_cpu_remove_cache(void *arg)
struct qlist_head *q;
q = this_cpu_ptr(&cpu_quarantine);
+ /*
+ * Ensure the ordering between the writing to q->offline and
+ * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted
+ * by interrupt.
+ */
+ if (READ_ONCE(q->offline))
+ return;
qlist_move_cache(q, &to_free, cache);
qlist_free_all(&to_free, cache);
}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index a203747ad2c0..9b2b5f56f4ae 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr)
return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
}
-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
-{
- long index;
-
- /* The checks do not affect performance; only called from slow-paths. */
-
- if (!is_kfence_address((void *)addr))
- return NULL;
-
- /*
- * May be an invalid index if called with an address at the edge of
- * __kfence_pool, in which case we would report an "invalid access"
- * error.
- */
- index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
- if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
- return NULL;
-
- return &kfence_metadata[index];
-}
-
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
{
unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index 9a6c4b1b12a8..600f2e2431d6 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -96,6 +96,27 @@ struct kfence_metadata {
extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
+static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+{
+ long index;
+
+ /* The checks do not affect performance; only called from slow-paths. */
+
+ if (!is_kfence_address((void *)addr))
+ return NULL;
+
+ /*
+ * May be an invalid index if called with an address at the edge of
+ * __kfence_pool, in which case we would report an "invalid access"
+ * error.
+ */
+ index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+ if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+ return NULL;
+
+ return &kfence_metadata[index];
+}
+
/* KFENCE error types for report generation. */
enum kfence_error_type {
KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index f93a7b2a338b..f5a6d8ba3e21 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
/* We encountered a memory safety error, taint the kernel! */
add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
}
+
+#ifdef CONFIG_PRINTK
+static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
+{
+ int i, j;
+
+ i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
+ for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
+ kp_stack[j] = (void *)track->stack_entries[i];
+ if (j < KS_ADDRS_COUNT)
+ kp_stack[j] = NULL;
+}
+
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
+ unsigned long flags;
+
+ if (!meta)
+ return false;
+
+ /*
+ * If state is UNUSED at least show the pointer requested; the rest
+ * would be garbage data.
+ */
+ kpp->kp_ptr = object;
+
+ /* Requesting info an a never-used object is almost certainly a bug. */
+ if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
+ return true;
+
+ raw_spin_lock_irqsave(&meta->lock, flags);
+
+ kpp->kp_slab = slab;
+ kpp->kp_slab_cache = meta->cache;
+ kpp->kp_objp = (void *)meta->addr;
+ kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
+ if (meta->state == KFENCE_OBJECT_FREED)
+ kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
+ /* get_stack_skipnr() ensures the first entry is outside allocator. */
+ kpp->kp_ret = kpp->kp_stack[0];
+
+ raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+ return true;
+}
+#endif
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index acd7cbb82e16..a182f5ddaf68 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
gfp_t gfp)
{
- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_alloc(__va(phys), size, min_count, gfp);
}
EXPORT_SYMBOL(kmemleak_alloc_phys);
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
*/
void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_free_part(__va(phys), size);
}
EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
*/
void __ref kmemleak_not_leak_phys(phys_addr_t phys)
{
- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_not_leak(__va(phys));
}
EXPORT_SYMBOL(kmemleak_not_leak_phys);
@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
*/
void __ref kmemleak_ignore_phys(phys_addr_t phys)
{
- if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+ if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
kmemleak_ignore(__va(phys));
}
EXPORT_SYMBOL(kmemleak_ignore_phys);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 725f76723220..598fece89e2b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -587,6 +587,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
static DEFINE_SPINLOCK(stats_flush_lock);
static DEFINE_PER_CPU(unsigned int, stats_updates);
static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
+static u64 flush_next_time;
+
+#define FLUSH_TIME (2UL*HZ)
/*
* Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
@@ -637,6 +640,7 @@ static void __mem_cgroup_flush_stats(void)
if (!spin_trylock_irqsave(&stats_flush_lock, flag))
return;
+ flush_next_time = jiffies_64 + 2*FLUSH_TIME;
cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
atomic_set(&stats_flush_threshold, 0);
spin_unlock_irqrestore(&stats_flush_lock, flag);
@@ -648,10 +652,16 @@ void mem_cgroup_flush_stats(void)
__mem_cgroup_flush_stats();
}
+void mem_cgroup_flush_stats_delayed(void)
+{
+ if (time_after64(jiffies_64, flush_next_time))
+ mem_cgroup_flush_stats();
+}
+
static void flush_memcg_stats_dwork(struct work_struct *w)
{
__mem_cgroup_flush_stats();
- queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
+ queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
}
/**
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index dcb6bb9cf731..27760c19bad7 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1498,50 +1498,113 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
return 0;
}
-static int memory_failure_hugetlb(unsigned long pfn, int flags)
+/*
+ * Called from hugetlb code with hugetlb_lock held.
+ *
+ * Return values:
+ * 0 - free hugepage
+ * 1 - in-use hugepage
+ * 2 - not a hugepage
+ * -EBUSY - the hugepage is busy (try to retry)
+ * -EHWPOISON - the hugepage is already hwpoisoned
+ */
+int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+ struct page *page = pfn_to_page(pfn);
+ struct page *head = compound_head(page);
+ int ret = 2; /* fallback to normal page handling */
+ bool count_increased = false;
+
+ if (!PageHeadHuge(head))
+ goto out;
+
+ if (flags & MF_COUNT_INCREASED) {
+ ret = 1;
+ count_increased = true;
+ } else if (HPageFreed(head) || HPageMigratable(head)) {
+ ret = get_page_unless_zero(head);
+ if (ret)
+ count_increased = true;
+ } else {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (TestSetPageHWPoison(head)) {
+ ret = -EHWPOISON;
+ goto out;
+ }
+
+ return ret;
+out:
+ if (count_increased)
+ put_page(head);
+ return ret;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * Taking refcount of hugetlb pages needs extra care about race conditions
+ * with basic operations like hugepage allocation/free/demotion.
+ * So some of prechecks for hwpoison (pinning, and testing/setting
+ * PageHWPoison) should be done in single hugetlb_lock range.
+ */
+static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
{
- struct page *p = pfn_to_page(pfn);
- struct page *head = compound_head(p);
int res;
+ struct page *p = pfn_to_page(pfn);
+ struct page *head;
unsigned long page_flags;
+ bool retry = true;
- if (TestSetPageHWPoison(head)) {
- pr_err("Memory failure: %#lx: already hardware poisoned\n",
- pfn);
- res = -EHWPOISON;
- if (flags & MF_ACTION_REQUIRED)
+ *hugetlb = 1;
+retry:
+ res = get_huge_page_for_hwpoison(pfn, flags);
+ if (res == 2) { /* fallback to normal page handling */
+ *hugetlb = 0;
+ return 0;
+ } else if (res == -EHWPOISON) {
+ pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn);
+ if (flags & MF_ACTION_REQUIRED) {
+ head = compound_head(p);
res = kill_accessing_process(current, page_to_pfn(head), flags);
+ }
+ return res;
+ } else if (res == -EBUSY) {
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
+ action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
return res;
}
+ head = compound_head(p);
+ lock_page(head);
+
+ if (hwpoison_filter(p)) {
+ ClearPageHWPoison(head);
+ res = -EOPNOTSUPP;
+ goto out;
+ }
+
num_poisoned_pages_inc();
- if (!(flags & MF_COUNT_INCREASED)) {
- res = get_hwpoison_page(p, flags);
- if (!res) {
- lock_page(head);
- if (hwpoison_filter(p)) {
- if (TestClearPageHWPoison(head))
- num_poisoned_pages_dec();
- unlock_page(head);
- return -EOPNOTSUPP;
- }
- unlock_page(head);
- res = MF_FAILED;
- if (__page_handle_poison(p)) {
- page_ref_inc(p);
- res = MF_RECOVERED;
- }
- action_result(pfn, MF_MSG_FREE_HUGE, res);
- return res == MF_RECOVERED ? 0 : -EBUSY;
- } else if (res < 0) {
- action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
- return -EBUSY;
+ /*
+ * Handling free hugepage. The possible race with hugepage allocation
+ * or demotion can be prevented by PageHWPoison flag.
+ */
+ if (res == 0) {
+ unlock_page(head);
+ res = MF_FAILED;
+ if (__page_handle_poison(p)) {
+ page_ref_inc(p);
+ res = MF_RECOVERED;
}
+ action_result(pfn, MF_MSG_FREE_HUGE, res);
+ return res == MF_RECOVERED ? 0 : -EBUSY;
}
- lock_page(head);
-
/*
* The page could have changed compound pages due to race window.
* If this happens just bail out.
@@ -1554,14 +1617,6 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
page_flags = head->flags;
- if (hwpoison_filter(p)) {
- if (TestClearPageHWPoison(head))
- num_poisoned_pages_dec();
- put_page(p);
- res = -EOPNOTSUPP;
- goto out;
- }
-
/*
* TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
* simply disable it. In order to make it work properly, we need
@@ -1588,6 +1643,12 @@ out:
unlock_page(head);
return res;
}
+#else
+static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
+{
+ return 0;
+}
+#endif
static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
struct dev_pagemap *pgmap)
@@ -1712,6 +1773,7 @@ int memory_failure(unsigned long pfn, int flags)
int res = 0;
unsigned long page_flags;
bool retry = true;
+ int hugetlb = 0;
if (!sysctl_memory_failure_recovery)
panic("Memory failure on page %lx", pfn);
@@ -1739,10 +1801,9 @@ int memory_failure(unsigned long pfn, int flags)
}
try_again:
- if (PageHuge(p)) {
- res = memory_failure_hugetlb(pfn, flags);
+ res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
+ if (hugetlb)
goto unlock_mutex;
- }
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
@@ -1800,6 +1861,19 @@ try_again:
if (PageTransHuge(hpage)) {
/*
+ * Bail out before SetPageHasHWPoisoned() if hpage is
+ * huge_zero_page, although PG_has_hwpoisoned is not
+ * checked in set_huge_zero_page().
+ *
+ * TODO: Handle memory failure of huge_zero_page thoroughly.
+ */
+ if (is_huge_zero_page(hpage)) {
+ action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+ res = -EBUSY;
+ goto unlock_mutex;
+ }
+
+ /*
* The flag must be set after the refcount is bumped
* otherwise it may race with THP split.
* And the flag can't be set in get_hwpoison_page() since
diff --git a/mm/mmap.c b/mm/mmap.c
index 3aa839f81e63..313b57d55a63 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2117,14 +2117,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
return addr;
}
-#ifndef arch_get_mmap_end
-#define arch_get_mmap_end(addr) (TASK_SIZE)
-#endif
-
-#ifndef arch_get_mmap_base
-#define arch_get_mmap_base(addr, base) (base)
-#endif
-
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 459d195d2ff6..f45ff1b7626a 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -1036,6 +1036,18 @@ int mmu_interval_notifier_insert_locked(
}
EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
+static bool
+mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
+ unsigned long seq)
+{
+ bool ret;
+
+ spin_lock(&subscriptions->lock);
+ ret = subscriptions->invalidate_seq != seq;
+ spin_unlock(&subscriptions->lock);
+ return ret;
+}
+
/**
* mmu_interval_notifier_remove - Remove a interval notifier
* @interval_sub: Interval subscription to unregister
@@ -1083,7 +1095,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
if (seq)
wait_event(subscriptions->wq,
- READ_ONCE(subscriptions->invalidate_seq) != seq);
+ mmu_interval_seq_released(subscriptions, seq));
/* pairs with mmgrab in mmu_interval_notifier_insert() */
mmdrop(mm);
diff --git a/mm/nommu.c b/mm/nommu.c
index 55a9e48a7a02..9d7afc2d959e 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -226,6 +226,8 @@ void *vmalloc(unsigned long size)
}
EXPORT_SYMBOL(vmalloc);
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
+
/*
* vzalloc - allocate virtually contiguous memory with zero fill
*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 7ec38194f8e1..49d7df39b02d 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -632,7 +632,7 @@ done:
*/
set_bit(MMF_OOM_SKIP, &mm->flags);
- /* Drop a reference taken by wake_oom_reaper */
+ /* Drop a reference taken by queue_oom_reaper */
put_task_struct(tsk);
}
@@ -644,12 +644,12 @@ static int oom_reaper(void *unused)
struct task_struct *tsk = NULL;
wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
- spin_lock(&oom_reaper_lock);
+ spin_lock_irq(&oom_reaper_lock);
if (oom_reaper_list != NULL) {
tsk = oom_reaper_list;
oom_reaper_list = tsk->oom_reaper_list;
}
- spin_unlock(&oom_reaper_lock);
+ spin_unlock_irq(&oom_reaper_lock);
if (tsk)
oom_reap_task(tsk);
@@ -658,22 +658,48 @@ static int oom_reaper(void *unused)
return 0;
}
-static void wake_oom_reaper(struct task_struct *tsk)
+static void wake_oom_reaper(struct timer_list *timer)
{
- /* mm is already queued? */
- if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
- return;
+ struct task_struct *tsk = container_of(timer, struct task_struct,
+ oom_reaper_timer);
+ struct mm_struct *mm = tsk->signal->oom_mm;
+ unsigned long flags;
- get_task_struct(tsk);
+ /* The victim managed to terminate on its own - see exit_mmap */
+ if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
+ put_task_struct(tsk);
+ return;
+ }
- spin_lock(&oom_reaper_lock);
+ spin_lock_irqsave(&oom_reaper_lock, flags);
tsk->oom_reaper_list = oom_reaper_list;
oom_reaper_list = tsk;
- spin_unlock(&oom_reaper_lock);
+ spin_unlock_irqrestore(&oom_reaper_lock, flags);
trace_wake_reaper(tsk->pid);
wake_up(&oom_reaper_wait);
}
+/*
+ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
+ * The timers timeout is arbitrary... the longer it is, the longer the worst
+ * case scenario for the OOM can take. If it is too small, the oom_reaper can
+ * get in the way and release resources needed by the process exit path.
+ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
+ * before the exit path is able to wake the futex waiters.
+ */
+#define OOM_REAPER_DELAY (2*HZ)
+static void queue_oom_reaper(struct task_struct *tsk)
+{
+ /* mm is already queued? */
+ if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
+ return;
+
+ get_task_struct(tsk);
+ timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
+ tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
+ add_timer(&tsk->oom_reaper_timer);
+}
+
static int __init oom_init(void)
{
oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
@@ -681,7 +707,7 @@ static int __init oom_init(void)
}
subsys_initcall(oom_init)
#else
-static inline void wake_oom_reaper(struct task_struct *tsk)
+static inline void queue_oom_reaper(struct task_struct *tsk)
{
}
#endif /* CONFIG_MMU */
@@ -932,7 +958,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
rcu_read_unlock();
if (can_oom_reap)
- wake_oom_reaper(victim);
+ queue_oom_reaper(victim);
mmdrop(mm);
put_task_struct(victim);
@@ -968,7 +994,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
task_lock(victim);
if (task_will_free_mem(victim)) {
mark_oom_victim(victim);
- wake_oom_reaper(victim);
+ queue_oom_reaper(victim);
task_unlock(victim);
put_task_struct(victim);
return;
@@ -1067,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc)
*/
if (task_will_free_mem(current)) {
mark_oom_victim(current);
- wake_oom_reaper(current);
+ queue_oom_reaper(current);
return true;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6e5b4488a0c5..0e42038382c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
do {
zone_type--;
zone = pgdat->node_zones + zone_type;
- if (managed_zone(zone)) {
+ if (populated_zone(zone)) {
zoneref_set_zone(zone, &zonerefs[nr_zones++]);
check_highest_zone(zone_type);
}
@@ -8919,7 +8919,7 @@ void *__init alloc_large_system_hash(const char *tablename,
table = memblock_alloc_raw(size,
SMP_CACHE_BYTES);
} else if (get_order(size) >= MAX_ORDER || hashdist) {
- table = __vmalloc(size, gfp_flags);
+ table = vmalloc_huge(size, gfp_flags);
virt = true;
if (table)
huge = is_vm_area_hugepages(table);
diff --git a/mm/page_io.c b/mm/page_io.c
index b417f000b49e..89fbf3cae30f 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio)
bio_put(bio);
}
-static void swap_slot_free_notify(struct page *page)
-{
- struct swap_info_struct *sis;
- struct gendisk *disk;
- swp_entry_t entry;
-
- /*
- * There is no guarantee that the page is in swap cache - the software
- * suspend code (at least) uses end_swap_bio_read() against a non-
- * swapcache page. So we must check PG_swapcache before proceeding with
- * this optimization.
- */
- if (unlikely(!PageSwapCache(page)))
- return;
-
- sis = page_swap_info(page);
- if (data_race(!(sis->flags & SWP_BLKDEV)))
- return;
-
- /*
- * The swap subsystem performs lazy swap slot freeing,
- * expecting that the page will be swapped out again.
- * So we can avoid an unnecessary write if the page
- * isn't redirtied.
- * This is good for real swap storage because we can
- * reduce unnecessary I/O and enhance wear-leveling
- * if an SSD is used as the as swap device.
- * But if in-memory swap device (eg zram) is used,
- * this causes a duplicated copy between uncompressed
- * data in VM-owned memory and compressed data in
- * zram-owned memory. So let's free zram-owned memory
- * and make the VM-owned decompressed page *dirty*,
- * so the page should be swapped out somewhere again if
- * we again wish to reclaim it.
- */
- disk = sis->bdev->bd_disk;
- entry.val = page_private(page);
- if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
- unsigned long offset;
-
- offset = swp_offset(entry);
-
- SetPageDirty(page);
- disk->fops->swap_slot_free_notify(sis->bdev,
- offset);
- }
-}
-
static void end_swap_bio_read(struct bio *bio)
{
struct page *page = bio_first_page_all(bio);
@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio)
}
SetPageUptodate(page);
- swap_slot_free_notify(page);
out:
unlock_page(page);
WRITE_ONCE(bio->bi_private, NULL);
@@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous)
if (sis->flags & SWP_SYNCHRONOUS_IO) {
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
if (!ret) {
- if (trylock_page(page)) {
- swap_slot_free_notify(page);
- unlock_page(page);
- }
-
count_vm_event(PSWPIN);
goto out;
}
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 098638d3b8a4..3b3cf2892b6a 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = {
.isolate_page = secretmem_isolate_page,
};
+static int secretmem_setattr(struct user_namespace *mnt_userns,
+ struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ unsigned int ia_valid = iattr->ia_valid;
+
+ if ((ia_valid & ATTR_SIZE) && inode->i_size)
+ return -EINVAL;
+
+ return simple_setattr(mnt_userns, dentry, iattr);
+}
+
+static const struct inode_operations secretmem_iops = {
+ .setattr = secretmem_setattr,
+};
+
static struct vfsmount *secretmem_mnt;
static struct file *secretmem_file_create(unsigned long flags)
@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags)
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
+ inode->i_op = &secretmem_iops;
inode->i_mapping->a_ops = &secretmem_aops;
/* pretend we are a normal file with zero size */
diff --git a/mm/shmem.c b/mm/shmem.c
index 529c9ad3e926..4b2fea33158e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
pgoff_t end_index;
unsigned long nr, ret;
loff_t i_size = i_size_read(inode);
- bool got_page;
end_index = i_size >> PAGE_SHIFT;
if (index > end_index)
@@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
*/
if (!offset)
mark_page_accessed(page);
- got_page = true;
+ /*
+ * Ok, we have the page, and it's up-to-date, so
+ * now we can copy it to user space...
+ */
+ ret = copy_page_to_iter(page, offset, nr, to);
+ put_page(page);
+
+ } else if (iter_is_iovec(to)) {
+ /*
+ * Copy to user tends to be so well optimized, but
+ * clear_user() not so much, that it is noticeably
+ * faster to copy the zero page instead of clearing.
+ */
+ ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
} else {
- page = ZERO_PAGE(0);
- got_page = false;
+ /*
+ * But submitting the same page twice in a row to
+ * splice() - or others? - can result in confusion:
+ * so don't attempt that optimization on pipes etc.
+ */
+ ret = iov_iter_zero(nr, to);
}
- /*
- * Ok, we have the page, and it's up-to-date, so
- * now we can copy it to user space...
- */
- ret = copy_page_to_iter(page, offset, nr, to);
retval += ret;
offset += ret;
index += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
- if (got_page)
- put_page(page);
if (!iov_iter_count(to))
break;
if (ret < nr) {
diff --git a/mm/slab.c b/mm/slab.c
index b04e40078bdf..0edb474edef1 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
#endif /* CONFIG_NUMA */
#ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
struct kmem_cache *cachep;
unsigned int objnr;
diff --git a/mm/slab.h b/mm/slab.h
index fd7ae2024897..95eb34174c1b 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -868,7 +868,7 @@ struct kmem_obj_info {
void *kp_stack[KS_ADDRS_COUNT];
void *kp_free_stack[KS_ADDRS_COUNT];
};
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
#endif
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 6ee64d6208b3..2b3206a2c3b5 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
}
EXPORT_SYMBOL_GPL(kmem_valid_obj);
+static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ if (__kfence_obj_info(kpp, object, slab))
+ return;
+ __kmem_obj_info(kpp, object, slab);
+}
+
/**
* kmem_dump_obj - Print available slab provenance information
* @object: slab object for which to find provenance information.
@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
else
pr_cont(" slab%s", cp);
+ if (is_kfence_address(object))
+ pr_cont(" (kfence)");
if (kp.kp_objp)
pr_cont(" start %px", kp.kp_objp);
if (kp.kp_data_offset)
diff --git a/mm/slob.c b/mm/slob.c
index dfa6808dff36..40ea6e2d4ccd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -463,7 +463,7 @@ out:
}
#ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
kpp->kp_ptr = object;
kpp->kp_slab = slab;
diff --git a/mm/slub.c b/mm/slub.c
index 74d92aa4a3a2..ed5c2c03a47a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
}
#ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
void *base;
int __maybe_unused i;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 0cb8e5ef1713..e9bb6db002aa 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -72,12 +72,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
_dst_pte = pte_mkdirty(_dst_pte);
if (page_in_cache && !vm_shared)
writable = false;
- if (writable) {
- if (wp_copy)
- _dst_pte = pte_mkuffd_wp(_dst_pte);
- else
- _dst_pte = pte_mkwrite(_dst_pte);
- }
+
+ /*
+ * Always mark a PTE as write-protected when needed, regardless of
+ * VM_WRITE, which the user might change.
+ */
+ if (wp_copy)
+ _dst_pte = pte_mkuffd_wp(_dst_pte);
+ else if (writable)
+ _dst_pte = pte_mkwrite(_dst_pte);
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
diff --git a/mm/util.c b/mm/util.c
index 54e5e761a9a9..3492a9e81aa3 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -592,8 +592,15 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
return NULL;
}
- return __vmalloc_node(size, 1, flags, node,
- __builtin_return_address(0));
+ /*
+ * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
+ * since the callers already cannot assume anything
+ * about the resulting pointer, and cannot play
+ * protection games.
+ */
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+ node, __builtin_return_address(0));
}
EXPORT_SYMBOL(kvmalloc_node);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e163372d3967..cadfbb5155ea 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock);
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
-#ifdef CONFIG_X86_64
-/*
- * called before a call to iounmap() if the caller wants vm_area_struct's
- * immediately freed.
- */
-void set_iounmap_nonlazy(void)
-{
- atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
-}
-#endif /* CONFIG_X86_64 */
-
/*
* Purges all lazily-freed vmap areas.
*/
@@ -2664,15 +2653,18 @@ static void __vunmap(const void *addr, int deallocate_pages)
vm_remove_mappings(area, deallocate_pages);
if (deallocate_pages) {
- unsigned int page_order = vm_area_page_order(area);
- int i, step = 1U << page_order;
+ int i;
- for (i = 0; i < area->nr_pages; i += step) {
+ for (i = 0; i < area->nr_pages; i++) {
struct page *page = area->pages[i];
BUG_ON(!page);
- mod_memcg_page_state(page, MEMCG_VMALLOC, -step);
- __free_pages(page, page_order);
+ mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+ /*
+ * High-order allocs for huge vmallocs are split, so
+ * can be freed as an array of order-0 allocations
+ */
+ __free_pages(page, 0);
cond_resched();
}
atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
@@ -2925,12 +2917,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
if (nr != nr_pages_request)
break;
}
- } else
- /*
- * Compound pages required for remap_vmalloc_page if
- * high-order pages.
- */
- gfp |= __GFP_COMP;
+ }
/* High-order pages or fallback path if "bulk" fails. */
@@ -2944,6 +2931,15 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
break;
+ /*
+ * Higher order allocations must be able to be treated as
+ * indepdenent small pages by callers (as they can with
+ * small-page vmallocs). Some drivers do their own refcounting
+ * on vmalloc_to_page() pages, some use page->mapping,
+ * page->lru, etc.
+ */
+ if (order)
+ split_page(page, order);
/*
* Careful, we allocate and map page-order pages, but
@@ -3003,11 +2999,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
if (gfp_mask & __GFP_ACCOUNT) {
- int i, step = 1U << page_order;
+ int i;
- for (i = 0; i < area->nr_pages; i += step)
- mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC,
- step);
+ for (i = 0; i < area->nr_pages; i++)
+ mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
}
/*
@@ -3106,7 +3101,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return NULL;
}
- if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
+ if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
unsigned long size_per_node;
/*
@@ -3273,21 +3268,24 @@ void *vmalloc(unsigned long size)
EXPORT_SYMBOL(vmalloc);
/**
- * vmalloc_no_huge - allocate virtually contiguous memory using small pages
- * @size: allocation size
+ * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
+ * @size: allocation size
+ * @gfp_mask: flags for the page level allocator
*
- * Allocate enough non-huge pages to cover @size from the page level
+ * Allocate enough pages to cover @size from the page level
* allocator and map them into contiguous kernel virtual space.
+ * If @size is greater than or equal to PMD_SIZE, allow using
+ * huge pages for the memory
*
* Return: pointer to the allocated memory or %NULL on error
*/
-void *vmalloc_no_huge(unsigned long size)
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
{
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
+ gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
-EXPORT_SYMBOL(vmalloc_no_huge);
+EXPORT_SYMBOL_GPL(vmalloc_huge);
/**
* vzalloc - allocate virtually contiguous memory with zero fill
diff --git a/mm/workingset.c b/mm/workingset.c
index 8a3828acc0bf..592569a8974c 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -355,7 +355,7 @@ void workingset_refault(struct folio *folio, void *shadow)
mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
- mem_cgroup_flush_stats();
+ mem_cgroup_flush_stats_delayed();
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 84312c836549..fe803bee419a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -670,7 +670,7 @@ static void le_conn_timeout(struct work_struct *work)
/* Disable LE Advertising */
le_disable_advertising(hdev);
hci_dev_lock(hdev);
- hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
+ hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
hci_dev_unlock(hdev);
return;
}
@@ -873,7 +873,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
EXPORT_SYMBOL(hci_get_route);
/* This function requires the caller holds hdev->lock */
-void hci_le_conn_failed(struct hci_conn *conn, u8 status)
+static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
{
struct hci_dev *hdev = conn->hdev;
struct hci_conn_params *params;
@@ -886,8 +886,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
params->conn = NULL;
}
- conn->state = BT_CLOSED;
-
/* If the status indicates successful cancellation of
* the attempt (i.e. Unknown Connection Id) there's no point of
* notifying failure since we'll go back to keep trying to
@@ -899,10 +897,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
mgmt_connect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
- hci_connect_cfm(conn, status);
-
- hci_conn_del(conn);
-
/* Since we may have temporarily stopped the background scanning in
* favor of connection establishment, we should restart it.
*/
@@ -914,6 +908,28 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
hci_enable_advertising(hdev);
}
+/* This function requires the caller holds hdev->lock */
+void hci_conn_failed(struct hci_conn *conn, u8 status)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
+
+ switch (conn->type) {
+ case LE_LINK:
+ hci_le_conn_failed(conn, status);
+ break;
+ case ACL_LINK:
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, status);
+ break;
+ }
+
+ conn->state = BT_CLOSED;
+ hci_connect_cfm(conn, status);
+ hci_conn_del(conn);
+}
+
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
{
struct hci_conn *conn = data;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index abaabfae19cc..66451661283c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2834,7 +2834,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* All connection failure handling is taken care of by the
- * hci_le_conn_failed function which is triggered by the HCI
+ * hci_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting.
*/
if (status)
@@ -2859,7 +2859,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* All connection failure handling is taken care of by the
- * hci_le_conn_failed function which is triggered by the HCI
+ * hci_conn_failed function which is triggered by the HCI
* request completion callbacks used for connecting.
*/
if (status)
@@ -3067,18 +3067,20 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
{
struct hci_ev_conn_complete *ev = data;
struct hci_conn *conn;
+ u8 status = ev->status;
- if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for invalid handle");
- return;
- }
-
- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
+ /* In case of error status and there is no connection pending
+ * just unlock as there is nothing to cleanup.
+ */
+ if (ev->status)
+ goto unlock;
+
/* Connection may not exist if auto-connected. Check the bredr
* allowlist to see if this device is allowed to auto connect.
* If link is an ACL type, create a connection class
@@ -3122,8 +3124,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
- if (!ev->status) {
+ if (!status) {
conn->handle = __le16_to_cpu(ev->handle);
+ if (conn->handle > HCI_CONN_HANDLE_MAX) {
+ bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+ conn->handle, HCI_CONN_HANDLE_MAX);
+ status = HCI_ERROR_INVALID_PARAMETERS;
+ goto done;
+ }
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
@@ -3164,19 +3172,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
&cp);
}
- } else {
- conn->state = BT_CLOSED;
- if (conn->type == ACL_LINK)
- mgmt_connect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, ev->status);
}
if (conn->type == ACL_LINK)
hci_sco_setup(conn, ev->status);
- if (ev->status) {
- hci_connect_cfm(conn, ev->status);
- hci_conn_del(conn);
+done:
+ if (status) {
+ hci_conn_failed(conn, status);
} else if (ev->link_type == SCO_LINK) {
switch (conn->setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_CVSD:
@@ -3185,7 +3188,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
break;
}
- hci_connect_cfm(conn, ev->status);
+ hci_connect_cfm(conn, status);
}
unlock:
@@ -4676,6 +4679,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
{
struct hci_ev_sync_conn_complete *ev = data;
struct hci_conn *conn;
+ u8 status = ev->status;
switch (ev->link_type) {
case SCO_LINK:
@@ -4690,12 +4694,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
return;
}
- if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete for invalid handle");
- return;
- }
-
- bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
hci_dev_lock(hdev);
@@ -4729,9 +4728,17 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
goto unlock;
}
- switch (ev->status) {
+ switch (status) {
case 0x00:
conn->handle = __le16_to_cpu(ev->handle);
+ if (conn->handle > HCI_CONN_HANDLE_MAX) {
+ bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+ conn->handle, HCI_CONN_HANDLE_MAX);
+ status = HCI_ERROR_INVALID_PARAMETERS;
+ conn->state = BT_CLOSED;
+ break;
+ }
+
conn->state = BT_CONNECTED;
conn->type = ev->link_type;
@@ -4775,8 +4782,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
}
}
- hci_connect_cfm(conn, ev->status);
- if (ev->status)
+ hci_connect_cfm(conn, status);
+ if (status)
hci_conn_del(conn);
unlock:
@@ -5527,11 +5534,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
struct smp_irk *irk;
u8 addr_type;
- if (handle > HCI_CONN_HANDLE_MAX) {
- bt_dev_err(hdev, "Ignoring HCI_LE_Connection_Complete for invalid handle");
- return;
- }
-
hci_dev_lock(hdev);
/* All controllers implicitly stop advertising in the event of a
@@ -5541,6 +5543,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn = hci_lookup_le_connect(hdev);
if (!conn) {
+ /* In case of error status and there is no connection pending
+ * just unlock as there is nothing to cleanup.
+ */
+ if (status)
+ goto unlock;
+
conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
if (!conn) {
bt_dev_err(hdev, "no memory for new connection");
@@ -5603,8 +5611,14 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
+ if (handle > HCI_CONN_HANDLE_MAX) {
+ bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
+ HCI_CONN_HANDLE_MAX);
+ status = HCI_ERROR_INVALID_PARAMETERS;
+ }
+
if (status) {
- hci_le_conn_failed(conn, status);
+ hci_conn_failed(conn, status);
goto unlock;
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 8f4c5698913d..13600bf120b0 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -4408,12 +4408,21 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 reason)
{
+ int err;
+
switch (conn->state) {
case BT_CONNECTED:
case BT_CONFIG:
return hci_disconnect_sync(hdev, conn, reason);
case BT_CONNECT:
- return hci_connect_cancel_sync(hdev, conn);
+ err = hci_connect_cancel_sync(hdev, conn);
+ /* Cleanup hci_conn object if it cannot be cancelled as it
+ * likelly means the controller and host stack are out of sync.
+ */
+ if (err)
+ hci_conn_failed(conn, err);
+
+ return err;
case BT_CONNECT2:
return hci_reject_conn_sync(hdev, conn, reason);
default:
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index e7b9c2636d10..af709c182674 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -108,6 +108,7 @@ struct xdp_test_data {
struct page_pool *pp;
struct xdp_frame **frames;
struct sk_buff **skbs;
+ struct xdp_mem_info mem;
u32 batch_size;
u32 frame_cnt;
};
@@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
{
- struct xdp_mem_info mem = {};
struct page_pool *pp;
int err = -ENOMEM;
struct page_pool_params pp_params = {
@@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
}
/* will copy 'mem.id' into pp->xdp_mem_id */
- err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
+ err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
if (err)
goto err_mmodel;
@@ -202,6 +202,7 @@ err_skbs:
static void xdp_test_run_teardown(struct xdp_test_data *xdp)
{
+ xdp_unreg_mem_model(&xdp->mem);
page_pool_destroy(xdp->pp);
kfree(xdp->frames);
kfree(xdp->skbs);
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 8cc44c367231..18affda2b522 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -353,6 +353,8 @@ static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
attr.orig_dev = br_dev;
vg = br_vlan_group(br);
+ if (!vg)
+ return 0;
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (v->msti) {
diff --git a/net/can/isotp.c b/net/can/isotp.c
index bafb0fb5f0e0..ff5d7870294e 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -906,6 +906,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct canfd_frame *cf;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
+ s64 hrtimer_sec = 0;
int off;
int err;
@@ -1004,7 +1005,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
isotp_create_fframe(cf, so, ae);
/* start timeout for FC */
- hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
+ hrtimer_sec = 1;
+ hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
+ HRTIMER_MODE_REL_SOFT);
}
/* send the first or only CAN frame */
@@ -1017,6 +1020,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
+
+ /* no transmission -> no timeout monitoring */
+ if (hrtimer_sec)
+ hrtimer_cancel(&so->txtimer);
+
goto err_out_drop;
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1c5815530e0d..83eb97c94e83 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2385,7 +2385,11 @@ again:
if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
err = -ENOSPC;
} else {
- pr_warn_ratelimited("FULL or reached pool quota\n");
+ if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL))
+ pr_warn_ratelimited("cluster is full (osdmap FULL)\n");
+ else
+ pr_warn_ratelimited("pool %lld is full or reached quota\n",
+ req->r_t.base_oloc.pool);
req->r_t.paused = true;
maybe_request_map(osdc);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 8c6c08446556..1461c2d9dec8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -10304,7 +10304,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
}
EXPORT_SYMBOL(netdev_stats_to_stats64);
-struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
+struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev)
{
struct net_device_core_stats __percpu *p;
@@ -10315,11 +10315,7 @@ struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev)
free_percpu(p);
/* This READ_ONCE() pairs with the cmpxchg() above */
- p = READ_ONCE(dev->core_stats);
- if (!p)
- return NULL;
-
- return this_cpu_ptr(p);
+ return READ_ONCE(dev->core_stats);
}
EXPORT_SYMBOL(netdev_core_stats_alloc);
@@ -10356,9 +10352,9 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
for_each_possible_cpu(i) {
core_stats = per_cpu_ptr(p, i);
- storage->rx_dropped += local_read(&core_stats->rx_dropped);
- storage->tx_dropped += local_read(&core_stats->tx_dropped);
- storage->rx_nohandler += local_read(&core_stats->rx_nohandler);
+ storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
+ storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
+ storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
}
}
return storage;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 03b6e649c428..6f7ec72016dc 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1032,7 +1032,7 @@ bool __skb_flow_dissect(const struct net *net,
key_eth_addrs = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_ETH_ADDRS,
target_container);
- memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+ memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
}
proto_again:
@@ -1183,6 +1183,7 @@ proto_again:
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
key_vlan->vlan_tpid = saved_vlan_tpid;
+ key_vlan->vlan_eth_type = proto;
}
fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 349480ef68a5..8b6b5e72b217 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
return dst->lwtstate->orig_output(net, sk, skb);
}
-static int xmit_check_hhlen(struct sk_buff *skb)
+static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
{
- int hh_len = skb_dst(skb)->dev->hard_header_len;
-
if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)
bpf = bpf_lwt_lwtunnel(dst->lwtstate);
if (bpf->xmit.prog) {
+ int hh_len = dst->dev->hard_header_len;
__be16 proto = skb->protocol;
int ret;
@@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
/* If the header was expanded, headroom might be too
* small for L2 header to come, expand as needed.
*/
- ret = xmit_check_hhlen(skb);
+ ret = xmit_check_hhlen(skb, hh_len);
if (unlikely(ret))
return ret;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 159c9c61e6af..d1381ea6d52e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -5242,6 +5242,8 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
*prividx = attr_id_l3_stats;
size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
+ if (!size_l3)
+ goto skip_l3_stats;
attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
IFLA_OFFLOAD_XSTATS_UNSPEC);
if (!attr)
@@ -5253,6 +5255,7 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
return err;
have_data = true;
+skip_l3_stats:
*prividx = 0;
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index ca6af86964bc..cf933225df32 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -562,7 +562,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
struct dsa_switch *ds = dp->ds;
- struct net_device *slave;
if (!dp->setup)
return;
@@ -584,11 +583,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
dsa_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
- slave = dp->slave;
-
- if (slave) {
+ if (dp->slave) {
+ dsa_slave_destroy(dp->slave);
dp->slave = NULL;
- dsa_slave_destroy(slave);
}
break;
}
@@ -1147,17 +1144,17 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
if (err)
goto teardown_cpu_ports;
- err = dsa_tree_setup_master(dst);
+ err = dsa_tree_setup_ports(dst);
if (err)
goto teardown_switches;
- err = dsa_tree_setup_ports(dst);
+ err = dsa_tree_setup_master(dst);
if (err)
- goto teardown_master;
+ goto teardown_ports;
err = dsa_tree_setup_lags(dst);
if (err)
- goto teardown_ports;
+ goto teardown_master;
dst->setup = true;
@@ -1165,10 +1162,10 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
return 0;
-teardown_ports:
- dsa_tree_teardown_ports(dst);
teardown_master:
dsa_tree_teardown_master(dst);
+teardown_ports:
+ dsa_tree_teardown_ports(dst);
teardown_switches:
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
@@ -1186,10 +1183,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_lags(dst);
- dsa_tree_teardown_ports(dst);
-
dsa_tree_teardown_master(dst);
+ dsa_tree_teardown_ports(dst);
+
dsa_tree_teardown_switches(dst);
dsa_tree_teardown_cpu_ports(dst);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 32d472a82241..cdc56ba11f52 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -1620,8 +1620,10 @@ int dsa_port_link_register_of(struct dsa_port *dp)
if (ds->ops->phylink_mac_link_down)
ds->ops->phylink_mac_link_down(ds, port,
MLO_AN_FIXED, PHY_INTERFACE_MODE_NA);
+ of_node_put(phy_np);
return dsa_port_phylink_register(dp);
}
+ of_node_put(phy_np);
return 0;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 41c69a6e7854..8022d50584db 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -285,7 +285,7 @@ static void dsa_port_manage_cpu_flood(struct dsa_port *dp)
if (other_dp->slave->flags & IFF_ALLMULTI)
flags.val |= BR_MCAST_FLOOD;
if (other_dp->slave->flags & IFF_PROMISC)
- flags.val |= BR_FLOOD;
+ flags.val |= BR_FLOOD | BR_MCAST_FLOOD;
}
err = dsa_port_pre_bridge_flags(dp, flags, NULL);
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index f64b805303cd..eb204ad36eee 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -21,6 +21,14 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
struct dsa_port *dp = dsa_slave_to_port(dev);
u8 *tag;
+ /* Calculate checksums (if required) before adding the trailer tag to
+ * avoid including it in calculations. That would lead to wrong
+ * checksums after the switch strips the tag.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb))
+ return NULL;
+
/* Tag encoding */
tag = skb_put(skb, HELLCREEK_TAG_LEN);
*tag = BIT(dp->index);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 70e6c87fbe3d..d747166bb291 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -446,7 +446,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
struct page *page;
struct sk_buff *trailer;
int tailen = esp->tailen;
- unsigned int allocsz;
/* this is non-NULL only with TCP/UDP Encapsulation */
if (x->encap) {
@@ -456,8 +455,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
return err;
}
- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
+ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
goto cow;
if (!skb_cloned(skb)) {
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 99db2e41ed10..aacee9dd771b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -459,14 +459,12 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
__be16 proto)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
-
- if (tunnel->parms.o_flags & TUNNEL_SEQ)
- tunnel->o_seqno++;
+ __be16 flags = tunnel->parms.o_flags;
/* Push GRE header. */
gre_build_header(skb, tunnel->tun_hlen,
- tunnel->parms.o_flags, proto, tunnel->parms.o_key,
- htonl(tunnel->o_seqno));
+ flags, proto, tunnel->parms.o_key,
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
@@ -504,7 +502,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
gre_build_header(skb, tunnel_hlen, flags, proto,
tunnel_id_to_key32(tun_info->key.tun_id),
- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@@ -581,7 +579,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
}
gre_build_header(skb, 8, TUNNEL_SEQ,
- proto, 0, htonl(tunnel->o_seqno++));
+ proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
@@ -605,8 +603,8 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
key = &info->key;
ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id),
- key->tos & ~INET_ECN_MASK, 0, skb->mark,
- skb_get_hash(skb));
+ key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
+ skb->mark, skb_get_hash(skb));
rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 5a473319d3a5..94017a8c3994 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -294,8 +294,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
- RT_TOS(iph->tos), tunnel->parms.link,
- tunnel->fwmark, 0);
+ RT_TOS(iph->tos), dev_net(dev),
+ tunnel->parms.link, tunnel->fwmark, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) {
@@ -570,7 +570,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
- 0, skb->mark, skb_get_hash(skb));
+ dev_net(dev), 0, skb->mark, skb_get_hash(skb));
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
goto tx_error;
@@ -726,7 +726,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
- tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+ tunnel->parms.o_key, RT_TOS(tos),
+ dev_net(dev), tunnel->parms.link,
tunnel->fwmark, skb_get_hash(skb));
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ /dev/null
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 2cb3b852d148..f33c31dd7366 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -281,6 +281,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
EXPORT_SYMBOL(cookie_ecn_ok);
struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
+ const struct tcp_request_sock_ops *af_ops,
struct sock *sk,
struct sk_buff *skb)
{
@@ -297,6 +298,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
return NULL;
treq = tcp_rsk(req);
+
+ /* treq->af_specific might be used to perform TCP_MD5 lookup */
+ treq->af_specific = af_ops;
+
treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
#if IS_ENABLED(CONFIG_MPTCP)
treq->is_mptcp = sk_is_mptcp(sk);
@@ -364,7 +369,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
goto out;
ret = NULL;
- req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb);
+ req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops,
+ &tcp_request_sock_ipv4_ops, sk, skb);
if (!req)
goto out;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2088f93fa37b..60f99e9fb6d1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3867,7 +3867,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_process_tlp_ack(sk, ack, flag);
if (tcp_ack_is_dubious(sk, flag)) {
- if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
+ if (!(flag & (FLAG_SND_UNA_ADVANCED |
+ FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
num_dupack = 1;
/* Consider if pure acks were aggregated in tcp_add_backlog() */
if (!(flag & FLAG_DATA))
@@ -5454,7 +5455,17 @@ static void tcp_new_space(struct sock *sk)
INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
}
-static void tcp_check_space(struct sock *sk)
+/* Caller made space either from:
+ * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
+ * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
+ *
+ * We might be able to generate EPOLLOUT to the application if:
+ * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
+ * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
+ * small enough that tcp_stream_memory_free() decides it
+ * is time to generate EPOLLOUT.
+ */
+void tcp_check_space(struct sock *sk)
{
/* pairs with tcp_poll() */
smp_mb();
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6366df7aaf2a..6854bb1fb32b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -531,7 +531,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->tsoffset = treq->ts_off;
#ifdef CONFIG_TCP_MD5SIG
newtp->md5sig_info = NULL; /*XXX*/
- if (newtp->af_specific->md5_lookup(sk, newsk))
+ if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
#endif
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9ede847f4199..1ca2f28c9981 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -82,6 +82,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
tcp_skb_pcount(skb));
+ tcp_check_space(sk);
}
/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index fbab921670cc..9a8e014d9b5b 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -74,27 +74,32 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
*
* If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
* called multiple times. We favor the information from the most recently
- * sent skb, i.e., the skb with the highest prior_delivered count.
+ * sent skb, i.e., the skb with the most recently sent time and the highest
+ * sequence.
*/
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct rate_sample *rs)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+ u64 tx_tstamp;
if (!scb->tx.delivered_mstamp)
return;
+ tx_tstamp = tcp_skb_timestamp_us(skb);
if (!rs->prior_delivered ||
- after(scb->tx.delivered, rs->prior_delivered)) {
+ tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
+ scb->end_seq, rs->last_end_seq)) {
rs->prior_delivered_ce = scb->tx.delivered_ce;
rs->prior_delivered = scb->tx.delivered;
rs->prior_mstamp = scb->tx.delivered_mstamp;
rs->is_app_limited = scb->tx.is_app_limited;
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
+ rs->last_end_seq = scb->end_seq;
/* Record send time of most recently ACKed packet: */
- tp->first_tx_mstamp = tcp_skb_timestamp_us(skb);
+ tp->first_tx_mstamp = tx_tstamp;
/* Find the duration of the "send phase" of this window: */
rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
scb->tx.first_tx_mstamp);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 55d604c9b3b3..f2120e92caf1 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -482,7 +482,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
struct page *page;
struct sk_buff *trailer;
int tailen = esp->tailen;
- unsigned int allocsz;
if (x->encap) {
int err = esp6_output_encap(x, skb, esp);
@@ -491,8 +490,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
return err;
}
- allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
- if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+ if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
+ ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
goto cow;
if (!skb_cloned(skb)) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 8753e9cec326..5136959b3dc5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -724,6 +724,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
{
struct ip6_tnl *tunnel = netdev_priv(dev);
__be16 protocol;
+ __be16 flags;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -733,16 +734,13 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
else
fl6->daddr = tunnel->parms.raddr;
- if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
- return -ENOMEM;
-
/* Push GRE header. */
protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
if (tunnel->parms.collect_md) {
struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
- __be16 flags;
+ int tun_hlen;
tun_info = skb_tunnel_info_txcheck(skb);
if (IS_ERR(tun_info) ||
@@ -760,21 +758,27 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
dsfield = key->tos;
flags = key->tun_flags &
(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
- tunnel->tun_hlen = gre_calc_hlen(flags);
+ tun_hlen = gre_calc_hlen(flags);
- gre_build_header(skb, tunnel->tun_hlen,
+ if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
+ return -ENOMEM;
+
+ gre_build_header(skb, tun_hlen,
flags, protocol,
tunnel_id_to_key32(tun_info->key.tun_id),
- (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
: 0);
} else {
- if (tunnel->parms.o_flags & TUNNEL_SEQ)
- tunnel->o_seqno++;
+ if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
+ return -ENOMEM;
+
+ flags = tunnel->parms.o_flags;
- gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+ gre_build_header(skb, tunnel->tun_hlen, flags,
protocol, tunnel->parms.o_key,
- htonl(tunnel->o_seqno));
+ (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno))
+ : 0);
}
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
@@ -1052,7 +1056,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
/* Push GRE header. */
proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
: htons(ETH_P_ERSPAN2);
- gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
+ gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
/* TooBig packet may have updated dst->dev's mtu */
if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e23f058166af..fa63ef2bd99c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
goto drop;
if (!net->ipv6.devconf_all->disable_policy &&
- !idev->cnf.disable_policy &&
+ (!idev || !idev->cnf.disable_policy) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 1da332450d98..8ce60ab89015 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -24,14 +24,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct sock *sk = sk_to_full_sk(sk_partial);
+ struct net_device *dev = skb_dst(skb)->dev;
struct flow_keys flkeys;
unsigned int hh_len;
struct dst_entry *dst;
int strict = (ipv6_addr_type(&iph->daddr) &
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
struct flowi6 fl6 = {
- .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
- strict ? skb_dst(skb)->dev->ifindex : 0,
.flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
@@ -39,6 +38,13 @@ int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff
};
int err;
+ if (sk && sk->sk_bound_dev_if)
+ fl6.flowi6_oif = sk->sk_bound_dev_if;
+ else if (strict)
+ fl6.flowi6_oif = dev->ifindex;
+ else
+ fl6.flowi6_oif = l3mdev_master_ifindex(dev);
+
fib6_rules_early_flow_dissect(net, skb, &fl6, &flkeys);
dst = ip6_route_output(net, sk, &fl6);
err = dst->error;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 169e9df6d172..c4b6ce017d5e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3292,6 +3292,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
+ unsigned int val;
int entries;
entries = dst_entries_get_fast(ops);
@@ -3302,13 +3303,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
entries <= rt_max_size)
goto out;
- net->ipv6.ip6_rt_gc_expire++;
- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
+ fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
- net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
out:
- net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
+ val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
return entries > rt_max_size;
}
@@ -6509,7 +6510,7 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
net->ipv6.sysctl.skip_notify_on_dev_down = 0;
- net->ipv6.ip6_rt_gc_expire = 30*HZ;
+ atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
ret = 0;
out:
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index d1b61d00368e..9cc123f000fb 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -170,7 +170,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out;
ret = NULL;
- req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb);
+ req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops,
+ &tcp_request_sock_ipv6_ops, sk, skb);
if (!req)
goto out;
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index 4eb8892fb2ff..ca10916340b0 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
dev = dev_get_by_index_rcu(net, ifindex);
while (dev && !netif_is_l3_master(dev))
- dev = netdev_master_upper_dev_get(dev);
+ dev = netdev_master_upper_dev_get_rcu(dev);
return dev ? dev->ifindex : 0;
}
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 9479f2787ea7..88d9cc945a21 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -441,7 +441,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
#define PRINT_HT_CAP(_cond, _str) \
do { \
if (_cond) \
- p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
+ p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
} while (0)
char *buf, *p;
int i;
diff --git a/net/mctp/device.c b/net/mctp/device.c
index f49be882e98e..99a3bda8852f 100644
--- a/net/mctp/device.c
+++ b/net/mctp/device.c
@@ -313,6 +313,7 @@ void mctp_dev_hold(struct mctp_dev *mdev)
void mctp_dev_put(struct mctp_dev *mdev)
{
if (mdev && refcount_dec_and_test(&mdev->refs)) {
+ kfree(mdev->addrs);
dev_put(mdev->dev);
kfree_rcu(mdev, rcu);
}
@@ -441,7 +442,6 @@ static void mctp_unregister(struct net_device *dev)
mctp_route_remove_dev(mdev);
mctp_neigh_remove_dev(mdev);
- kfree(mdev->addrs);
mctp_dev_put(mdev);
}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 2c467c422dc6..fb67f1ca2495 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1495,7 +1495,7 @@ int __init ip_vs_conn_init(void)
pr_info("Connection hash table configured "
"(size=%d, memory=%ldKbytes)\n",
ip_vs_conn_tab_size,
- (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
+ (long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024);
IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n",
sizeof(struct ip_vs_conn));
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 8ec55cd72572..204a5cdff5b1 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -556,24 +556,14 @@ static bool tcp_in_window(struct nf_conn *ct,
}
}
- } else if (((state->state == TCP_CONNTRACK_SYN_SENT
- && dir == IP_CT_DIR_ORIGINAL)
- || (state->state == TCP_CONNTRACK_SYN_RECV
- && dir == IP_CT_DIR_REPLY))
- && after(end, sender->td_end)) {
+ } else if (tcph->syn &&
+ after(end, sender->td_end) &&
+ (state->state == TCP_CONNTRACK_SYN_SENT ||
+ state->state == TCP_CONNTRACK_SYN_RECV)) {
/*
* RFC 793: "if a TCP is reinitialized ... then it need
* not wait at all; it must only be sure to use sequence
* numbers larger than those recently used."
- */
- sender->td_end =
- sender->td_maxend = end;
- sender->td_maxwin = (win == 0 ? 1 : win);
-
- tcp_options(skb, dataoff, tcph, sender);
- } else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
- state->state == TCP_CONNTRACK_SYN_SENT) {
- /* Retransmitted syn-ack, or syn (simultaneous open).
*
* Re-init state for this direction, just like for the first
* syn(-ack) reply, it might differ in seq, ack or tcp options.
@@ -581,7 +571,8 @@ static bool tcp_in_window(struct nf_conn *ct,
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
- if (!tcph->ack)
+
+ if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return true;
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 3e1afd10a9b6..55aa55b252b2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -823,7 +823,7 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
-#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD)
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
.procname = "nf_flowtable_udp_timeout",
.maxlen = sizeof(unsigned int),
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 128ee3b300d6..16c3a39689f4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -9363,7 +9363,7 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
}
EXPORT_SYMBOL_GPL(nft_parse_u32_check);
-static unsigned int nft_parse_register(const struct nlattr *attr, u32 *preg)
+static int nft_parse_register(const struct nlattr *attr, u32 *preg)
{
unsigned int reg;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index d600a566da32..7325bee7d144 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -349,7 +349,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*ext = &rbe->ext;
return -EEXIST;
} else {
- p = &parent->rb_left;
+ overlap = false;
+ if (nft_rbtree_interval_end(rbe))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
}
}
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index bd3792f080ed..05ae5a338b6f 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -37,12 +37,11 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
#ifdef CONFIG_SOCK_CGROUP_DATA
static noinline bool
-nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
+nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
{
- struct sock *sk = skb_to_full_sk(pkt->skb);
struct cgroup *cgrp;
- if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
+ if (!sk_fullsock(sk))
return false;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
@@ -55,6 +54,32 @@ nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
}
#endif
+static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt)
+{
+ const struct net_device *indev = nft_in(pkt);
+ const struct sk_buff *skb = pkt->skb;
+ struct sock *sk = NULL;
+
+ if (!indev)
+ return NULL;
+
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev);
+ break;
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ case NFPROTO_IPV6:
+ sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev);
+ break;
+#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return sk;
+}
+
static void nft_socket_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -68,20 +93,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
sk = NULL;
if (!sk)
- switch(nft_pf(pkt)) {
- case NFPROTO_IPV4:
- sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
- break;
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- case NFPROTO_IPV6:
- sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
- break;
-#endif
- default:
- WARN_ON_ONCE(1);
- regs->verdict.code = NFT_BREAK;
- return;
- }
+ sk = nft_socket_do_lookup(pkt);
if (!sk) {
regs->verdict.code = NFT_BREAK;
@@ -109,7 +121,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
break;
#ifdef CONFIG_SOCK_CGROUP_DATA
case NFT_SOCKET_CGROUPV2:
- if (!nft_sock_get_eval_cgroupv2(dest, pkt, priv->level)) {
+ if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
regs->verdict.code = NFT_BREAK;
return;
}
@@ -225,6 +237,16 @@ static bool nft_socket_reduce(struct nft_regs_track *track,
return nft_expr_reduce_bitwise(track, expr);
}
+static int nft_socket_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_LOCAL_OUT));
+}
+
static struct nft_expr_type nft_socket_type;
static const struct nft_expr_ops nft_socket_ops = {
.type = &nft_socket_type,
@@ -232,6 +254,7 @@ static const struct nft_expr_ops nft_socket_ops = {
.eval = nft_socket_eval,
.init = nft_socket_init,
.dump = nft_socket_dump,
+ .validate = nft_socket_validate,
.reduce = nft_socket_reduce,
};
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 47a876ccd288..05a3795eac8e 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2263,6 +2263,13 @@ static int netlink_dump(struct sock *sk)
* single netdev. The outcome is MSG_TRUNC error.
*/
skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+
+ /* Make sure malicious BPF programs can not read unitialized memory
+ * from skb->head -> skb->data
+ */
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+
netlink_skb_set_owner_r(skb, sk);
if (nlk->dump_done_errno > 0) {
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index d2537383a3e8..6a193cce2a75 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -560,6 +560,10 @@ static int nci_close_device(struct nci_dev *ndev)
mutex_lock(&ndev->req_lock);
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+ /* Need to flush the cmd wq in case
+ * there is a queued/running cmd_work
+ */
+ flush_workqueue(ndev->cmd_wq);
del_timer_sync(&ndev->cmd_timer);
del_timer_sync(&ndev->data_timer);
mutex_unlock(&ndev->req_lock);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 7176156d3844..4c09cf8a0ab2 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2465,7 +2465,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+ if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
OVS_NLERR(log, "Flow action size exceeds max %u",
MAX_ACTIONS_BUFSIZE);
return ERR_PTR(-EMSGSIZE);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c39c09899fd0..002d2b9c69dd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2858,8 +2858,9 @@ tpacket_error:
status = TP_STATUS_SEND_REQUEST;
err = po->xmit(skb);
- if (unlikely(err > 0)) {
- err = net_xmit_errno(err);
+ if (unlikely(err != 0)) {
+ if (err > 0)
+ err = net_xmit_errno(err);
if (err && __packet_get_status(po, ph) ==
TP_STATUS_AVAILABLE) {
/* skb was destructed already */
@@ -3060,8 +3061,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->no_fcs = 1;
err = po->xmit(skb);
- if (err > 0 && (err = net_xmit_errno(err)) != 0)
- goto out_unlock;
+ if (unlikely(err != 0)) {
+ if (err > 0)
+ err = net_xmit_errno(err);
+ if (err)
+ goto out_unlock;
+ }
dev_put(dev);
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index f15d6942da45..cc7e30733feb 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
struct rxrpc_net *rxnet = rxrpc_net(net);
rxnet->live = false;
+ del_timer_sync(&rxnet->peer_keepalive_timer);
cancel_work_sync(&rxnet->peer_keepalive_work);
+ /* Remove the timer again as the worker may have restarted it. */
del_timer_sync(&rxnet->peer_keepalive_timer);
rxrpc_destroy_all_calls(rxnet);
rxrpc_destroy_all_connections(rxnet);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2957f8f5cea7..f0699f39afdb 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1672,10 +1672,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
if (chain->flushing)
return -EAGAIN;
+ RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
if (*chain_info->pprev == chain->filter_chain)
tcf_chain0_head_change(chain, tp);
tcf_proto_get(tp);
- RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
rcu_assign_pointer(*chain_info->pprev, tp);
return 0;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c80fc49c0da1..ed5e6f08e74a 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1013,6 +1013,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
static void fl_set_key_vlan(struct nlattr **tb,
__be16 ethertype,
int vlan_id_key, int vlan_prio_key,
+ int vlan_next_eth_type_key,
struct flow_dissector_key_vlan *key_val,
struct flow_dissector_key_vlan *key_mask)
{
@@ -1031,6 +1032,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
}
key_val->vlan_tpid = ethertype;
key_mask->vlan_tpid = cpu_to_be16(~0);
+ if (tb[vlan_next_eth_type_key]) {
+ key_val->vlan_eth_type =
+ nla_get_be16(tb[vlan_next_eth_type_key]);
+ key_mask->vlan_eth_type = cpu_to_be16(~0);
+ }
}
static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
@@ -1602,8 +1608,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (eth_type_vlan(ethertype)) {
fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
- TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
- &mask->vlan);
+ TCA_FLOWER_KEY_VLAN_PRIO,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ &key->vlan, &mask->vlan);
if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
@@ -1611,6 +1618,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
fl_set_key_vlan(tb, ethertype,
TCA_FLOWER_KEY_CVLAN_ID,
TCA_FLOWER_KEY_CVLAN_PRIO,
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
&key->cvlan, &mask->cvlan);
fl_set_key_val(tb, &key->basic.n_proto,
TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
@@ -3002,13 +3010,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
goto nla_put_failure;
if (mask->basic.n_proto) {
- if (mask->cvlan.vlan_tpid) {
+ if (mask->cvlan.vlan_eth_type) {
if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
key->basic.n_proto))
goto nla_put_failure;
- } else if (mask->vlan.vlan_tpid) {
+ } else if (mask->vlan.vlan_eth_type) {
if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
- key->basic.n_proto))
+ key->vlan.vlan_eth_type))
goto nla_put_failure;
}
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index cf5649292ee0..4d27300c287c 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
return 0;
}
-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+static void __u32_destroy_key(struct tc_u_knode *n)
{
struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
tcf_exts_destroy(&n->exts);
- tcf_exts_put_net(&n->exts);
if (ht && --ht->refcnt == 0)
kfree(ht);
+ kfree(n);
+}
+
+static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+{
+ tcf_exts_put_net(&n->exts);
#ifdef CONFIG_CLS_U32_PERF
if (free_pf)
free_percpu(n->pf);
@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
if (free_pf)
free_percpu(n->pcpu_success);
#endif
- kfree(n);
- return 0;
+ __u32_destroy_key(n);
}
/* u32_delete_key_rcu should be called when free'ing a copied
@@ -811,10 +815,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
new->flags = n->flags;
RCU_INIT_POINTER(new->ht_down, ht);
- /* bump reference count as long as we hold pointer to structure */
- if (ht)
- ht->refcnt++;
-
#ifdef CONFIG_CLS_U32_PERF
/* Statistics may be incremented by readers during update
* so we must keep them in tact. When the node is later destroyed
@@ -836,6 +836,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
return NULL;
}
+ /* bump reference count as long as we hold pointer to structure */
+ if (ht)
+ ht->refcnt++;
+
return new;
}
@@ -900,13 +904,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
extack);
if (err) {
- u32_destroy_key(new, false);
+ __u32_destroy_key(new);
return err;
}
err = u32_replace_hw_knode(tp, new, flags, extack);
if (err) {
- u32_destroy_key(new, false);
+ __u32_destroy_key(new);
return err;
}
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 377f896bdedc..b9c71a304d39 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -417,7 +417,8 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
{
struct taprio_sched *q = qdisc_priv(sch);
- if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
+ /* sk_flags are only safe to use on full sockets. */
+ if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
if (!is_valid_interval(skb, sch))
return qdisc_drop(skb, sch, to_free);
} else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b3815b568e8e..463c4a58d2c3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -458,6 +458,10 @@ void sctp_generate_reconf_event(struct timer_list *t)
goto out_unlock;
}
+ /* This happens when the response arrives after the timer is triggered. */
+ if (!asoc->strreset_chunk)
+ goto out_unlock;
+
error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
asoc->state, asoc->ep, asoc,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7f342bc12735..52edee1322fc 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -781,7 +781,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
}
}
- if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
@@ -932,7 +932,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
/* Set peer label for connection. */
if (security_sctp_assoc_established((struct sctp_association *)asoc,
- chunk->skb))
+ chunk->head_skb ?: chunk->skb))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Verify that the chunk length for the COOKIE-ACK is OK.
@@ -2262,7 +2262,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
}
/* Update socket peer label if first association. */
- if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+ if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3e1a9600be5e..7b0427658056 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5636,7 +5636,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
* Set the daddr and initialize id to something more random and also
* copy over any ip options.
*/
- sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
+ sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
sp->pf->copy_ip_options(sk, sock->sk);
/* Populate the fields of the newsk from the oldsk and migrate the
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index f0d118e9f155..fce16b9d6e1a 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -121,6 +121,7 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
bool *own_req)
{
struct smc_sock *smc;
+ struct sock *child;
smc = smc_clcsock_user_data(sk);
@@ -134,8 +135,17 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
}
/* passthrough to original syn recv sock fct */
- return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
- own_req);
+ child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
+ own_req);
+ /* child must not inherit smc or its ops */
+ if (child) {
+ rcu_assign_sk_user_data(child, NULL);
+
+ /* v4-mapped sockets don't inherit parent ops. Don't restore. */
+ if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
+ inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
+ }
+ return child;
drop:
dst_release(dst);
@@ -233,11 +243,27 @@ struct proto smc_proto6 = {
};
EXPORT_SYMBOL_GPL(smc_proto6);
+static void smc_fback_restore_callbacks(struct smc_sock *smc)
+{
+ struct sock *clcsk = smc->clcsock->sk;
+
+ write_lock_bh(&clcsk->sk_callback_lock);
+ clcsk->sk_user_data = NULL;
+
+ smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
+ smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
+ smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
+ smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
+
+ write_unlock_bh(&clcsk->sk_callback_lock);
+}
+
static void smc_restore_fallback_changes(struct smc_sock *smc)
{
if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
smc->clcsock->file->private_data = smc->sk.sk_socket;
smc->clcsock->file = NULL;
+ smc_fback_restore_callbacks(smc);
}
}
@@ -363,6 +389,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
sk->sk_prot->hash(sk);
sk_refcnt_debug_inc(sk);
mutex_init(&smc->clcsock_release_lock);
+ smc_init_saved_callbacks(smc);
return sk;
}
@@ -734,47 +761,73 @@ out:
static void smc_fback_state_change(struct sock *clcsk)
{
- struct smc_sock *smc =
- smc_clcsock_user_data(clcsk);
+ struct smc_sock *smc;
- if (!smc)
- return;
- smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
+ read_lock_bh(&clcsk->sk_callback_lock);
+ smc = smc_clcsock_user_data(clcsk);
+ if (smc)
+ smc_fback_forward_wakeup(smc, clcsk,
+ smc->clcsk_state_change);
+ read_unlock_bh(&clcsk->sk_callback_lock);
}
static void smc_fback_data_ready(struct sock *clcsk)
{
- struct smc_sock *smc =
- smc_clcsock_user_data(clcsk);
+ struct smc_sock *smc;
- if (!smc)
- return;
- smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
+ read_lock_bh(&clcsk->sk_callback_lock);
+ smc = smc_clcsock_user_data(clcsk);
+ if (smc)
+ smc_fback_forward_wakeup(smc, clcsk,
+ smc->clcsk_data_ready);
+ read_unlock_bh(&clcsk->sk_callback_lock);
}
static void smc_fback_write_space(struct sock *clcsk)
{
- struct smc_sock *smc =
- smc_clcsock_user_data(clcsk);
+ struct smc_sock *smc;
- if (!smc)
- return;
- smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
+ read_lock_bh(&clcsk->sk_callback_lock);
+ smc = smc_clcsock_user_data(clcsk);
+ if (smc)
+ smc_fback_forward_wakeup(smc, clcsk,
+ smc->clcsk_write_space);
+ read_unlock_bh(&clcsk->sk_callback_lock);
}
static void smc_fback_error_report(struct sock *clcsk)
{
- struct smc_sock *smc =
- smc_clcsock_user_data(clcsk);
+ struct smc_sock *smc;
- if (!smc)
- return;
- smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
+ read_lock_bh(&clcsk->sk_callback_lock);
+ smc = smc_clcsock_user_data(clcsk);
+ if (smc)
+ smc_fback_forward_wakeup(smc, clcsk,
+ smc->clcsk_error_report);
+ read_unlock_bh(&clcsk->sk_callback_lock);
+}
+
+static void smc_fback_replace_callbacks(struct smc_sock *smc)
+{
+ struct sock *clcsk = smc->clcsock->sk;
+
+ write_lock_bh(&clcsk->sk_callback_lock);
+ clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+
+ smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
+ &smc->clcsk_state_change);
+ smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
+ &smc->clcsk_data_ready);
+ smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
+ &smc->clcsk_write_space);
+ smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
+ &smc->clcsk_error_report);
+
+ write_unlock_bh(&clcsk->sk_callback_lock);
}
static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
- struct sock *clcsk;
int rc = 0;
mutex_lock(&smc->clcsock_release_lock);
@@ -782,10 +835,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
rc = -EBADF;
goto out;
}
- clcsk = smc->clcsock->sk;
- if (smc->use_fallback)
- goto out;
smc->use_fallback = true;
smc->fallback_rsn = reason_code;
smc_stat_fallback(smc);
@@ -800,18 +850,7 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
* in smc sk->sk_wq and they should be woken up
* as clcsock's wait queue is woken up.
*/
- smc->clcsk_state_change = clcsk->sk_state_change;
- smc->clcsk_data_ready = clcsk->sk_data_ready;
- smc->clcsk_write_space = clcsk->sk_write_space;
- smc->clcsk_error_report = clcsk->sk_error_report;
-
- clcsk->sk_state_change = smc_fback_state_change;
- clcsk->sk_data_ready = smc_fback_data_ready;
- clcsk->sk_write_space = smc_fback_write_space;
- clcsk->sk_error_report = smc_fback_error_report;
-
- smc->clcsock->sk->sk_user_data =
- (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+ smc_fback_replace_callbacks(smc);
}
out:
mutex_unlock(&smc->clcsock_release_lock);
@@ -1465,6 +1504,8 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_state = SMC_CLOSED;
if (rc == -EPIPE || rc == -EAGAIN)
smc->sk.sk_err = EPIPE;
+ else if (rc == -ECONNREFUSED)
+ smc->sk.sk_err = ECONNREFUSED;
else if (signal_pending(current))
smc->sk.sk_err = -sock_intr_errno(timeo);
sock_put(&smc->sk); /* passive closing */
@@ -1584,6 +1625,19 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
* function; switch it back to the original sk_data_ready function
*/
new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
+
+ /* if new clcsock has also inherited the fallback-specific callback
+ * functions, switch them back to the original ones.
+ */
+ if (lsmc->use_fallback) {
+ if (lsmc->clcsk_state_change)
+ new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
+ if (lsmc->clcsk_write_space)
+ new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
+ if (lsmc->clcsk_error_report)
+ new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
+ }
+
(*new_smc)->clcsock = new_clcsock;
out:
return rc;
@@ -2343,17 +2397,20 @@ out:
static void smc_clcsock_data_ready(struct sock *listen_clcsock)
{
- struct smc_sock *lsmc =
- smc_clcsock_user_data(listen_clcsock);
+ struct smc_sock *lsmc;
+ read_lock_bh(&listen_clcsock->sk_callback_lock);
+ lsmc = smc_clcsock_user_data(listen_clcsock);
if (!lsmc)
- return;
+ goto out;
lsmc->clcsk_data_ready(listen_clcsock);
if (lsmc->sk.sk_state == SMC_LISTEN) {
sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
sock_put(&lsmc->sk);
}
+out:
+ read_unlock_bh(&listen_clcsock->sk_callback_lock);
}
static int smc_listen(struct socket *sock, int backlog)
@@ -2385,10 +2442,12 @@ static int smc_listen(struct socket *sock, int backlog)
/* save original sk_data_ready function and establish
* smc-specific sk_data_ready function
*/
- smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
- smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
+ write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+ smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
+ smc_clcsock_data_ready, &smc->clcsk_data_ready);
+ write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
/* save original ops */
smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
@@ -2403,7 +2462,11 @@ static int smc_listen(struct socket *sock, int backlog)
rc = kernel_listen(smc->clcsock, backlog);
if (rc) {
- smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
+ write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
+ smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
+ &smc->clcsk_data_ready);
+ smc->clcsock->sk->sk_user_data = NULL;
+ write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
goto out;
}
sk->sk_max_ack_backlog = backlog;
@@ -2664,8 +2727,10 @@ static int smc_shutdown(struct socket *sock, int how)
if (smc->use_fallback) {
rc = kernel_sock_shutdown(smc->clcsock, how);
sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
- if (sk->sk_shutdown == SHUTDOWN_MASK)
+ if (sk->sk_shutdown == SHUTDOWN_MASK) {
sk->sk_state = SMC_CLOSED;
+ sock_put(sk);
+ }
goto out;
}
switch (how) {
diff --git a/net/smc/smc.h b/net/smc/smc.h
index ea0620529ebe..5ed765ea0c73 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -288,12 +288,41 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
return (struct smc_sock *)sk;
}
+static inline void smc_init_saved_callbacks(struct smc_sock *smc)
+{
+ smc->clcsk_state_change = NULL;
+ smc->clcsk_data_ready = NULL;
+ smc->clcsk_write_space = NULL;
+ smc->clcsk_error_report = NULL;
+}
+
static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
{
return (struct smc_sock *)
((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
}
+/* save target_cb in saved_cb, and replace target_cb with new_cb */
+static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *),
+ void (*new_cb)(struct sock *),
+ void (**saved_cb)(struct sock *))
+{
+ /* only save once */
+ if (!*saved_cb)
+ *saved_cb = *target_cb;
+ *target_cb = new_cb;
+}
+
+/* restore target_cb to saved_cb, and reset saved_cb to NULL */
+static inline void smc_clcsock_restore_cb(void (**target_cb)(struct sock *),
+ void (**saved_cb)(struct sock *))
+{
+ if (!*saved_cb)
+ return;
+ *target_cb = *saved_cb;
+ *saved_cb = NULL;
+}
+
extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
extern struct workqueue_struct *smc_close_wq; /* wq for close work */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index ce27399b38b1..f9f3f59c79de 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -191,7 +191,8 @@ static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
flags, SMC_NETLINK_DUMP_UEID);
if (!hdr)
return -ENOMEM;
- snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
+ memcpy(ueid_str, ueid, SMC_MAX_EID_LEN);
+ ueid_str[SMC_MAX_EID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
@@ -252,7 +253,8 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
goto end;
smc_ism_get_system_eid(&seid);
- snprintf(seid_str, sizeof(seid_str), "%s", seid);
+ memcpy(seid_str, seid, SMC_MAX_EID_LEN);
+ seid_str[SMC_MAX_EID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
goto err;
read_lock(&smc_clc_eid_table.lock);
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 676cb2333d3c..31db7438857c 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -214,8 +214,11 @@ again:
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk); /* wake up accept */
if (smc->clcsock && smc->clcsock->sk) {
- smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
+ write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
+ smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
+ &smc->clcsk_data_ready);
smc->clcsock->sk->sk_user_data = NULL;
+ write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
}
smc_close_cleanup_listen(sk);
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 7984f8883472..7055ed10e316 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -311,8 +311,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (!strncmp(ibdev->ibdev->name, ib_name,
sizeof(ibdev->ibdev->name)) ||
- !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
- IB_DEVICE_NAME_MAX - 1)) {
+ (ibdev->ibdev->dev.parent &&
+ !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
+ IB_DEVICE_NAME_MAX - 1))) {
goto out;
}
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 297c49855038..5b59e2103526 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1231,6 +1231,8 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
dr->daddr = rqstp->rq_daddr;
dr->argslen = rqstp->rq_arg.len >> 2;
dr->xprt_hlen = rqstp->rq_xprt_hlen;
+ dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+ rqstp->rq_xprt_ctxt = NULL;
/* back up head to the start of the buffer and copy */
skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
@@ -1269,6 +1271,7 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
rqstp->rq_xprt_hlen = dr->xprt_hlen;
rqstp->rq_daddr = dr->daddr;
rqstp->rq_respages = rqstp->rq_pages;
+ rqstp->rq_xprt_ctxt = dr->xprt_ctxt;
svc_xprt_received(rqstp->rq_xprt);
return (dr->argslen<<2) - dr->xprt_hlen;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index cf76a6ad127b..864131a9fc6e 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -831,7 +831,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
goto out_err;
if (ret == 0)
goto out_drop;
- rqstp->rq_xprt_hlen = ret;
+ rqstp->rq_xprt_hlen = 0;
if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
goto out_backchannel;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 12f7b56771d9..af875ad4a822 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -483,11 +483,13 @@ handle_error:
copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
copy = min_t(size_t, copy, (max_open_record_len - record->len));
- rc = tls_device_copy_data(page_address(pfrag->page) +
- pfrag->offset, copy, msg_iter);
- if (rc)
- goto handle_error;
- tls_append_frag(record, pfrag, copy);
+ if (copy) {
+ rc = tls_device_copy_data(page_address(pfrag->page) +
+ pfrag->offset, copy, msg_iter);
+ if (rc)
+ goto handle_error;
+ tls_append_frag(record, pfrag, copy);
+ }
size -= copy;
if (!size) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ee1c2b6b6971..21e808fcb676 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -528,7 +528,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
.len = IEEE80211_MAX_MESH_ID_LEN },
[NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
- [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+ /* allow 3 for NUL-termination, we used to declare this NLA_STRING */
+ [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3),
[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
[NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b2fdac96bab0..4a6d86432910 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2018,11 +2018,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
/* this is a nontransmitting bss, we need to add it to
* transmitting bss' list if it is not there
*/
+ spin_lock_bh(&rdev->bss_lock);
if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
&res->pub)) {
if (__cfg80211_unlink_bss(rdev, res))
rdev->bss_generation++;
}
+ spin_unlock_bh(&rdev->bss_lock);
}
trace_cfg80211_return_bss(&res->pub);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 2c34caee0fd1..3a9348030e20 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
- if (xsk_no_wakeup(sk))
+ if (xs->zc && xsk_no_wakeup(sk))
return 0;
pool = xs->pool;
@@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xp_get_pool(umem_xs->pool);
xs->pool = umem_xs->pool;
+
+ /* If underlying shared umem was created without Tx
+ * ring, allocate Tx descs array that Tx batching API
+ * utilizes
+ */
+ if (xs->tx && !xs->pool->tx_descs) {
+ err = xp_alloc_tx_descs(xs->pool, xs);
+ if (err) {
+ xp_put_pool(xs->pool);
+ sockfd_put(sock);
+ goto out_unlock;
+ }
+ }
}
xdp_get_umem(umem_xs->umem);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index af040ffa14ff..87bdd71c7bb6 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
kvfree(pool);
}
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
+{
+ pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
+ GFP_KERNEL);
+ if (!pool->tx_descs)
+ return -ENOMEM;
+
+ return 0;
+}
+
struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
struct xdp_umem *umem)
{
@@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
if (!pool->heads)
goto out;
- if (xs->tx) {
- pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
- if (!pool->tx_descs)
+ if (xs->tx)
+ if (xp_alloc_tx_descs(pool, xs))
goto out;
- }
pool->chunk_mask = ~((u64)umem->chunk_size - 1);
pool->addrs_cnt = umem->size;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 19aa994f5d2c..00bd0ecff5a1 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2593,12 +2593,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
__u32 mark = 0;
+ int oif;
if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
family = xfrm[i]->props.family;
- dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+ oif = fl->flowi_oif ? : fl->flowi_l3mdev;
+ dst = xfrm_dst_lookup(xfrm[i], tos, oif,
&saddr, &daddr, family, mark);
err = PTR_ERR(dst);
if (IS_ERR(dst))
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 9717e6f6fb31..33c1ed581522 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -231,7 +231,7 @@ objtool_args = \
$(if $(part-of-module), --module) \
$(if $(CONFIG_X86_KERNEL_IBT), --lto --ibt) \
$(if $(CONFIG_FRAME_POINTER),, --no-fp) \
- $(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\
+ $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \
$(if $(CONFIG_RETPOLINE), --retpoline) \
$(if $(CONFIG_X86_SMAP), --uaccess) \
$(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index 589454bce930..8425da41de0d 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
.help = "disable\tturn off latent entropy instrumentation\n",
};
-static unsigned HOST_WIDE_INT seed;
-/*
- * get_random_seed() (this is a GCC function) generates the seed.
- * This is a simple random generator without any cryptographic security because
- * the entropy doesn't come from here.
- */
+static unsigned HOST_WIDE_INT deterministic_seed;
+static unsigned HOST_WIDE_INT rnd_buf[32];
+static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
+static int urandom_fd = -1;
+
static unsigned HOST_WIDE_INT get_random_const(void)
{
- unsigned int i;
- unsigned HOST_WIDE_INT ret = 0;
-
- for (i = 0; i < 8 * sizeof(ret); i++) {
- ret = (ret << 1) | (seed & 1);
- seed >>= 1;
- if (ret & 1)
- seed ^= 0xD800000000000000ULL;
+ if (deterministic_seed) {
+ unsigned HOST_WIDE_INT w = deterministic_seed;
+ w ^= w << 13;
+ w ^= w >> 7;
+ w ^= w << 17;
+ deterministic_seed = w;
+ return deterministic_seed;
}
- return ret;
+ if (urandom_fd < 0) {
+ urandom_fd = open("/dev/urandom", O_RDONLY);
+ gcc_assert(urandom_fd >= 0);
+ }
+ if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
+ gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
+ rnd_idx = 0;
+ }
+ return rnd_buf[rnd_idx++];
}
static tree tree_get_random_const(tree type)
@@ -537,8 +543,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
tree type, id;
int quals;
- seed = get_random_seed(false);
-
if (in_lto_p)
return;
@@ -573,6 +577,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
const struct plugin_argument * const argv = plugin_info->argv;
int i;
+ /*
+ * Call get_random_seed() with noinit=true, so that this returns
+ * 0 in the case where no seed has been passed via -frandom-seed.
+ */
+ deterministic_seed = get_random_seed(true);
+
static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
{
.base = &latent_entropy_decl,
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 20f44504a644..9361a1ef02c9 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -140,7 +140,7 @@ objtool_link()
if ! is_enabled CONFIG_FRAME_POINTER; then
objtoolopt="${objtoolopt} --no-fp"
fi
- if is_enabled CONFIG_GCOV_KERNEL || is_enabled CONFIG_LTO_CLANG; then
+ if is_enabled CONFIG_GCOV_KERNEL; then
objtoolopt="${objtoolopt} --no-unreachable"
fi
if is_enabled CONFIG_RETPOLINE; then
diff --git a/sound/core/init.c b/sound/core/init.c
index 31ba7024e3ad..726a8353201f 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -209,6 +209,12 @@ static void __snd_card_release(struct device *dev, void *data)
* snd_card_register(), the very first devres action to call snd_card_free()
* is added automatically. In that way, the resource disconnection is assured
* at first, then released in the expected order.
+ *
+ * If an error happens at the probe before snd_card_register() is called and
+ * there have been other devres resources, you'd need to free the card manually
+ * via snd_card_free() call in the error; otherwise it may lead to UAF due to
+ * devres call orders. You can use snd_card_free_on_error() helper for
+ * handling it more easily.
*/
int snd_devm_card_new(struct device *parent, int idx, const char *xid,
struct module *module, size_t extra_size,
@@ -235,6 +241,28 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
}
EXPORT_SYMBOL_GPL(snd_devm_card_new);
+/**
+ * snd_card_free_on_error - a small helper for handling devm probe errors
+ * @dev: the managed device object
+ * @ret: the return code from the probe callback
+ *
+ * This function handles the explicit snd_card_free() call at the error from
+ * the probe callback. It's just a small helper for simplifying the error
+ * handling with the managed devices.
+ */
+int snd_card_free_on_error(struct device *dev, int ret)
+{
+ struct snd_card *card;
+
+ if (!ret)
+ return 0;
+ card = devres_find(dev, __snd_card_release, NULL, NULL);
+ if (card)
+ snd_card_free(card);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_card_free_on_error);
+
static int snd_card_init(struct snd_card *card, struct device *parent,
int idx, const char *xid, struct module *module,
size_t extra_size)
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 6fd763d4d15b..15dc7160ba34 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
};
#endif /* CONFIG_X86 */
+#ifdef CONFIG_SND_DMA_SGBUF
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
+#endif
+
/*
* Non-contiguous pages allocator
*/
@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
DEFAULT_GFP, 0);
- if (!sgt)
+ if (!sgt) {
+#ifdef CONFIG_SND_DMA_SGBUF
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+ else
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
+ return snd_dma_sg_fallback_alloc(dmab, size);
+#else
return NULL;
+#endif
+ }
+
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
sg_dma_address(sgt->sgl));
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
if (!p)
return NULL;
+ if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
+ return p;
for_each_sgtable_page(sgt, &iter, 0)
set_memory_wc(sg_wc_address(&iter), 1);
return p;
@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
.get_page = snd_dma_noncontig_get_page,
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
};
+
+/* Fallback SG-buffer allocations for x86 */
+struct snd_dma_sg_fallback {
+ size_t count;
+ struct page **pages;
+ dma_addr_t *addrs;
+};
+
+static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+ struct snd_dma_sg_fallback *sgbuf)
+{
+ size_t i;
+
+ if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+ set_pages_array_wb(sgbuf->pages, sgbuf->count);
+ for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+ dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
+ page_address(sgbuf->pages[i]),
+ sgbuf->addrs[i]);
+ kvfree(sgbuf->pages);
+ kvfree(sgbuf->addrs);
+ kfree(sgbuf);
+}
+
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ struct snd_dma_sg_fallback *sgbuf;
+ struct page **pages;
+ size_t i, count;
+ void *p;
+
+ sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+ if (!sgbuf)
+ return NULL;
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ goto error;
+ sgbuf->pages = pages;
+ sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+ if (!sgbuf->addrs)
+ goto error;
+
+ for (i = 0; i < count; sgbuf->count++, i++) {
+ p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
+ &sgbuf->addrs[i], DEFAULT_GFP);
+ if (!p)
+ goto error;
+ sgbuf->pages[i] = virt_to_page(p);
+ }
+
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+ set_pages_array_wc(pages, count);
+ p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
+ if (!p)
+ goto error;
+ dmab->private_data = sgbuf;
+ return p;
+
+ error:
+ __snd_dma_sg_fallback_free(dmab, sgbuf);
+ return NULL;
+}
+
+static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
+{
+ vunmap(dmab->area);
+ __snd_dma_sg_fallback_free(dmab, dmab->private_data);
+}
+
+static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return vm_map_pages(area, sgbuf->pages, sgbuf->count);
+}
+
+static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
+ .alloc = snd_dma_sg_fallback_alloc,
+ .free = snd_dma_sg_fallback_free,
+ .mmap = snd_dma_sg_fallback_mmap,
+ /* reuse vmalloc helpers */
+ .get_addr = snd_dma_vmalloc_get_addr,
+ .get_page = snd_dma_vmalloc_get_page,
+ .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
#endif /* CONFIG_SND_DMA_SGBUF */
/*
@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
#ifdef CONFIG_GENERIC_ALLOCATOR
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */
+#ifdef CONFIG_SND_DMA_SGBUF
+ [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+ [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+#endif
#endif /* CONFIG_HAS_DMA */
};
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 4866aed97aac..5588b6a1ee8b 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -433,7 +433,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
return 0;
width = pcm_formats[(INT)format].phys; /* physical width */
pat = pcm_formats[(INT)format].silence;
- if (! width)
+ if (!width || !pat)
return -EINVAL;
/* signed or 1 byte data */
if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index 11235baaf6fa..f212f233ea61 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -693,8 +693,6 @@ static int snd_mtpav_probe(struct platform_device *dev)
mtp_card->outmidihwport = 0xffffffff;
timer_setup(&mtp_card->timer, snd_mtpav_output_timer, 0);
- card->private_free = snd_mtpav_free;
-
err = snd_mtpav_get_RAWMIDI(mtp_card);
if (err < 0)
return err;
@@ -716,6 +714,8 @@ static int snd_mtpav_probe(struct platform_device *dev)
if (err < 0)
return err;
+ card->private_free = snd_mtpav_free;
+
platform_set_drvdata(dev, card);
printk(KERN_INFO "Motu MidiTimePiece on parallel port irq: %d ioport: 0x%lx\n", irq, port);
return 0;
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index efe810af28c5..3f35972e1cf7 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -116,16 +116,24 @@ static int i915_component_master_match(struct device *dev, int subcomponent,
return 0;
}
-/* check whether intel graphics is present */
-static bool i915_gfx_present(void)
+/* check whether Intel graphics is present and reachable */
+static int i915_gfx_present(struct pci_dev *hdac_pci)
{
- static const struct pci_device_id ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
- .class = PCI_BASE_CLASS_DISPLAY << 16,
- .class_mask = 0xff << 16 },
- {}
- };
- return pci_dev_present(ids);
+ unsigned int class = PCI_BASE_CLASS_DISPLAY << 16;
+ struct pci_dev *display_dev = NULL;
+ bool match = false;
+
+ do {
+ display_dev = pci_get_class(class, display_dev);
+
+ if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL &&
+ connectivity_check(display_dev, hdac_pci)) {
+ pci_dev_put(display_dev);
+ match = true;
+ }
+ } while (!match && display_dev);
+
+ return match;
}
/**
@@ -145,7 +153,7 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
struct drm_audio_component *acomp;
int err;
- if (!i915_gfx_present())
+ if (!i915_gfx_present(to_pci_dev(bus->dev)))
return -ENODEV;
err = snd_hdac_acomp_init(bus, NULL,
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index 70fd8b13938e..a8fe01764b25 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -390,26 +390,49 @@ static const struct config_entry config_table[] = {
/* Alder Lake */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE)
+ /* Alderlake-S */
{
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x7ad0,
},
+ /* RaptorLake-S */
{
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
- .device = 0x51c8,
+ .device = 0x7a50,
},
+ /* Alderlake-P */
{
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
- .device = 0x51cc,
+ .device = 0x51c8,
},
{
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x51cd,
},
+ /* Alderlake-PS */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51c9,
+ },
+ /* Alderlake-M */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51cc,
+ },
+ /* Alderlake-N */
{
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x54c8,
},
+ /* RaptorLake-P */
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51ca,
+ },
+ {
+ .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+ .device = 0x51cb,
+ },
#endif
};
diff --git a/sound/isa/galaxy/galaxy.c b/sound/isa/galaxy/galaxy.c
index ea001c80149d..3164eb8510fa 100644
--- a/sound/isa/galaxy/galaxy.c
+++ b/sound/isa/galaxy/galaxy.c
@@ -478,7 +478,7 @@ static void snd_galaxy_free(struct snd_card *card)
galaxy_set_config(galaxy, galaxy->config);
}
-static int snd_galaxy_probe(struct device *dev, unsigned int n)
+static int __snd_galaxy_probe(struct device *dev, unsigned int n)
{
struct snd_galaxy *galaxy;
struct snd_wss *chip;
@@ -598,6 +598,11 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
return 0;
}
+static int snd_galaxy_probe(struct device *dev, unsigned int n)
+{
+ return snd_card_free_on_error(dev, __snd_galaxy_probe(dev, n));
+}
+
static struct isa_driver snd_galaxy_driver = {
.match = snd_galaxy_match,
.probe = snd_galaxy_probe,
diff --git a/sound/isa/sc6000.c b/sound/isa/sc6000.c
index 26ab7ff80768..60398fced046 100644
--- a/sound/isa/sc6000.c
+++ b/sound/isa/sc6000.c
@@ -537,7 +537,7 @@ static void snd_sc6000_free(struct snd_card *card)
sc6000_setup_board(vport, 0);
}
-static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+static int __snd_sc6000_probe(struct device *devptr, unsigned int dev)
{
static const int possible_irqs[] = { 5, 7, 9, 10, 11, -1 };
static const int possible_dmas[] = { 1, 3, 0, -1 };
@@ -662,6 +662,11 @@ static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
return 0;
}
+static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+{
+ return snd_card_free_on_error(devptr, __snd_sc6000_probe(devptr, dev));
+}
+
static struct isa_driver snd_sc6000_driver = {
.match = snd_sc6000_match,
.probe = snd_sc6000_probe,
diff --git a/sound/oss/dmasound/dmasound.h b/sound/oss/dmasound/dmasound.h
index c1c52b479da2..ad8ce6a1c25c 100644
--- a/sound/oss/dmasound/dmasound.h
+++ b/sound/oss/dmasound/dmasound.h
@@ -88,11 +88,7 @@ static inline int ioctl_return(int __user *addr, int value)
*/
extern int dmasound_init(void);
-#ifdef MODULE
extern void dmasound_deinit(void);
-#else
-#define dmasound_deinit() do { } while (0)
-#endif
/* description of the set-up applies to either hard or soft settings */
@@ -114,9 +110,7 @@ typedef struct {
void *(*dma_alloc)(unsigned int, gfp_t);
void (*dma_free)(void *, unsigned int);
int (*irqinit)(void);
-#ifdef MODULE
void (*irqcleanup)(void);
-#endif
void (*init)(void);
void (*silence)(void);
int (*setFormat)(int);
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index 0c95828ac0b1..164335d3c200 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -206,12 +206,10 @@ module_param(writeBufSize, int, 0);
MODULE_LICENSE("GPL");
-#ifdef MODULE
static int sq_unit = -1;
static int mixer_unit = -1;
static int state_unit = -1;
static int irq_installed;
-#endif /* MODULE */
/* control over who can modify resources shared between play/record */
static fmode_t shared_resource_owner;
@@ -391,9 +389,6 @@ static const struct file_operations mixer_fops =
static void mixer_init(void)
{
-#ifndef MODULE
- int mixer_unit;
-#endif
mixer_unit = register_sound_mixer(&mixer_fops, -1);
if (mixer_unit < 0)
return;
@@ -1171,9 +1166,6 @@ static const struct file_operations sq_fops =
static int sq_init(void)
{
const struct file_operations *fops = &sq_fops;
-#ifndef MODULE
- int sq_unit;
-#endif
sq_unit = register_sound_dsp(fops, -1);
if (sq_unit < 0) {
@@ -1366,9 +1358,6 @@ static const struct file_operations state_fops = {
static int state_init(void)
{
-#ifndef MODULE
- int state_unit;
-#endif
state_unit = register_sound_special(&state_fops, SND_DEV_STATUS);
if (state_unit < 0)
return state_unit ;
@@ -1386,10 +1375,9 @@ static int state_init(void)
int dmasound_init(void)
{
int res ;
-#ifdef MODULE
+
if (irq_installed)
return -EBUSY;
-#endif
/* Set up sound queue, /dev/audio and /dev/dsp. */
@@ -1408,9 +1396,7 @@ int dmasound_init(void)
printk(KERN_ERR "DMA sound driver: Interrupt initialization failed\n");
return -ENODEV;
}
-#ifdef MODULE
irq_installed = 1;
-#endif
printk(KERN_INFO "%s DMA sound driver rev %03d installed\n",
dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) +
@@ -1424,8 +1410,6 @@ int dmasound_init(void)
return 0;
}
-#ifdef MODULE
-
void dmasound_deinit(void)
{
if (irq_installed) {
@@ -1444,9 +1428,7 @@ void dmasound_deinit(void)
unregister_sound_dsp(sq_unit);
}
-#else /* !MODULE */
-
-static int dmasound_setup(char *str)
+static int __maybe_unused dmasound_setup(char *str)
{
int ints[6], size;
@@ -1489,8 +1471,6 @@ static int dmasound_setup(char *str)
__setup("dmasound=", dmasound_setup);
-#endif /* !MODULE */
-
/*
* Conversion tables
*/
@@ -1577,9 +1557,7 @@ char dmasound_alaw2dma8[] = {
EXPORT_SYMBOL(dmasound);
EXPORT_SYMBOL(dmasound_init);
-#ifdef MODULE
EXPORT_SYMBOL(dmasound_deinit);
-#endif
EXPORT_SYMBOL(dmasound_write_sq);
EXPORT_SYMBOL(dmasound_catchRadius);
#ifdef HAS_8BIT_TABLES
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index bba4dae8dcc7..50e30704bf6f 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -844,8 +844,8 @@ snd_ad1889_create(struct snd_card *card, struct pci_dev *pci)
}
static int
-snd_ad1889_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+__snd_ad1889_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
int err;
static int devno;
@@ -904,6 +904,12 @@ snd_ad1889_probe(struct pci_dev *pci,
return 0;
}
+static int snd_ad1889_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_ad1889_probe(pci, pci_id));
+}
+
static const struct pci_device_id snd_ad1889_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) },
{ 0, },
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 92eb59db106d..2378a39abaeb 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -2124,8 +2124,8 @@ static int snd_ali_create(struct snd_card *card,
return 0;
}
-static int snd_ali_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_ali_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct snd_ali *codec;
@@ -2170,6 +2170,12 @@ static int snd_ali_probe(struct pci_dev *pci,
return 0;
}
+static int snd_ali_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_ali_probe(pci, pci_id));
+}
+
static struct pci_driver ali5451_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_ali_ids,
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index b86565dcdbe4..c70aff060120 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -708,7 +708,7 @@ static int snd_als300_probe(struct pci_dev *pci,
err = snd_als300_create(card, pci, chip_type);
if (err < 0)
- return err;
+ goto error;
strcpy(card->driver, "ALS300");
if (chip->chip_type == DEVICE_ALS300_PLUS)
@@ -723,11 +723,15 @@ static int snd_als300_probe(struct pci_dev *pci,
err = snd_card_register(card);
if (err < 0)
- return err;
+ goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
static struct pci_driver als300_driver = {
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index 535eccd124be..f33aeb692a11 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -806,8 +806,8 @@ static void snd_card_als4000_free( struct snd_card *card )
snd_als4000_free_gameport(acard);
}
-static int snd_card_als4000_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_card_als4000_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -930,6 +930,12 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
return 0;
}
+static int snd_card_als4000_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_card_als4000_probe(pci, pci_id));
+}
+
#ifdef CONFIG_PM_SLEEP
static int snd_als4000_suspend(struct device *dev)
{
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index b8e035d5930d..43d01f1847ed 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -1572,8 +1572,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
}
-static int snd_atiixp_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct atiixp *chip;
@@ -1623,6 +1623,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
return 0;
}
+static int snd_atiixp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
static struct pci_driver atiixp_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_atiixp_ids,
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 178dce8ef1e9..8864c4c3c7e1 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -1201,8 +1201,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
}
-static int snd_atiixp_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct atiixp_modem *chip;
@@ -1247,6 +1247,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
return 0;
}
+static int snd_atiixp_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
static struct pci_driver atiixp_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_atiixp_ids,
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index 342ef2a6655e..eb234153691b 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -193,7 +193,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci)
// constructor -- see "Constructor" sub-section
static int
-snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -310,6 +310,12 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
+static int
+snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_vortex_probe(pci, pci_id));
+}
+
// pci_driver definition
static struct pci_driver vortex_driver = {
.name = KBUILD_MODNAME,
diff --git a/sound/pci/aw2/aw2-alsa.c b/sound/pci/aw2/aw2-alsa.c
index d56f126d6fdd..29a4bcdec237 100644
--- a/sound/pci/aw2/aw2-alsa.c
+++ b/sound/pci/aw2/aw2-alsa.c
@@ -275,7 +275,7 @@ static int snd_aw2_probe(struct pci_dev *pci,
/* (3) Create main component */
err = snd_aw2_create(card, pci);
if (err < 0)
- return err;
+ goto error;
/* initialize mutex */
mutex_init(&chip->mtx);
@@ -294,13 +294,17 @@ static int snd_aw2_probe(struct pci_dev *pci,
/* (6) Register card instance */
err = snd_card_register(card);
if (err < 0)
- return err;
+ goto error;
/* (7) Set PCI driver data */
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
/* open callback */
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 089050470ff2..7f329dfc5404 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2427,7 +2427,7 @@ snd_azf3328_create(struct snd_card *card,
}
static int
-snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2520,6 +2520,12 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
+static int
+snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_azf3328_probe(pci, pci_id));
+}
+
#ifdef CONFIG_PM_SLEEP
static inline void
snd_azf3328_suspend_regs(const struct snd_azf3328 *chip,
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index d23f93163841..621985bfee5d 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -805,8 +805,8 @@ static int snd_bt87x_detect_card(struct pci_dev *pci)
return SND_BT87X_BOARD_UNKNOWN;
}
-static int snd_bt87x_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_bt87x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -889,6 +889,12 @@ static int snd_bt87x_probe(struct pci_dev *pci,
return 0;
}
+static int snd_bt87x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_bt87x_probe(pci, pci_id));
+}
+
/* default entries for all Bt87x cards - it's not exported */
/* driver_data is set to 0 to call detection */
static const struct pci_device_id snd_bt87x_default_ids[] = {
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 8577f9fa5ea6..cf1bac7a435f 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1725,8 +1725,8 @@ static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
}
-static int snd_ca0106_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_ca0106_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1786,6 +1786,12 @@ static int snd_ca0106_probe(struct pci_dev *pci,
return 0;
}
+static int snd_ca0106_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_ca0106_probe(pci, pci_id));
+}
+
#ifdef CONFIG_PM_SLEEP
static int snd_ca0106_suspend(struct device *dev)
{
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index dab801d9d3b4..727db6d43391 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -3247,15 +3247,19 @@ static int snd_cmipci_probe(struct pci_dev *pci,
err = snd_cmipci_create(card, pci, dev);
if (err < 0)
- return err;
+ goto error;
err = snd_card_register(card);
if (err < 0)
- return err;
+ goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index e7367402b84a..0c9cadf7b3b8 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1827,8 +1827,8 @@ static void snd_cs4281_opl3_command(struct snd_opl3 *opl3, unsigned short cmd,
spin_unlock_irqrestore(&opl3->reg_lock, flags);
}
-static int snd_cs4281_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_cs4281_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1888,6 +1888,12 @@ static int snd_cs4281_probe(struct pci_dev *pci,
return 0;
}
+static int snd_cs4281_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_cs4281_probe(pci, pci_id));
+}
+
/*
* Power Management
*/
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 499fa0148f9a..440b8f9b40c9 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -281,8 +281,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
return 0;
}
-static int snd_cs5535audio_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_cs5535audio_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -331,6 +331,12 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
return 0;
}
+static int snd_cs5535audio_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_cs5535audio_probe(pci, pci_id));
+}
+
static struct pci_driver cs5535audio_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_cs5535audio_ids,
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 25b012ef5c3e..c70c3ac4e99a 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1970,8 +1970,8 @@ static int snd_echo_create(struct snd_card *card,
}
/* constructor */
-static int snd_echo_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_echo_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2139,6 +2139,11 @@ static int snd_echo_probe(struct pci_dev *pci,
return 0;
}
+static int snd_echo_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_echo_probe(pci, pci_id));
+}
#if defined(CONFIG_PM_SLEEP)
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index c49c44dc1082..89043392f3ec 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1491,8 +1491,8 @@ static int snd_emu10k1x_midi(struct emu10k1x *emu)
return 0;
}
-static int snd_emu10k1x_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_emu10k1x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1554,6 +1554,12 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
return 0;
}
+static int snd_emu10k1x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_emu10k1x_probe(pci, pci_id));
+}
+
// PCI IDs
static const struct pci_device_id snd_emu10k1x_ids[] = {
{ PCI_VDEVICE(CREATIVE, 0x0006), 0 }, /* Dell OEM version (EMU10K1) */
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 2651f0c64c06..94efe347a97a 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -2304,8 +2304,8 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int snd_audiopci_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_audiopci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2369,6 +2369,12 @@ static int snd_audiopci_probe(struct pci_dev *pci,
return 0;
}
+static int snd_audiopci_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_audiopci_probe(pci, pci_id));
+}
+
static struct pci_driver ens137x_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_audiopci_ids,
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 00b976f42a3d..e34ec6f89e7e 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1716,8 +1716,8 @@ static int snd_es1938_mixer(struct es1938 *chip)
}
-static int snd_es1938_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_es1938_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1796,6 +1796,12 @@ static int snd_es1938_probe(struct pci_dev *pci,
return 0;
}
+static int snd_es1938_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_es1938_probe(pci, pci_id));
+}
+
static struct pci_driver es1938_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_es1938_ids,
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 6a8a02a9ecf4..4a7e20bb11bc 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -2741,8 +2741,8 @@ static int snd_es1968_create(struct snd_card *card,
/*
*/
-static int snd_es1968_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_es1968_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2848,6 +2848,12 @@ static int snd_es1968_probe(struct pci_dev *pci,
return 0;
}
+static int snd_es1968_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_es1968_probe(pci, pci_id));
+}
+
static struct pci_driver es1968_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_es1968_ids,
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 9c22ff19e56d..62b3cb126c6d 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1268,8 +1268,8 @@ static int snd_fm801_create(struct snd_card *card,
return 0;
}
-static int snd_card_fm801_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_card_fm801_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1333,6 +1333,12 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
return 0;
}
+static int snd_card_fm801_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_card_fm801_probe(pci, pci_id));
+}
+
#ifdef CONFIG_PM_SLEEP
static const unsigned char saved_regs[] = {
FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3e086eebf88d..31fe41795571 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1395,7 +1395,7 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
last_try:
/* the last try; check the empty slots in pins */
- for (i = 0; i < spec->num_nids; i++) {
+ for (i = 0; i < spec->pcm_used; i++) {
if (!test_bit(i, &spec->pcm_bitmap))
return i;
}
@@ -2325,7 +2325,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
* dev_num is the device entry number in a pin
*/
- if (codec->mst_no_extra_pcms)
+ if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms)
+ pcm_num = spec->num_cvts;
+ else if (codec->mst_no_extra_pcms)
pcm_num = spec->num_nids;
else
pcm_num = spec->num_nids + spec->dev_num - 1;
@@ -4551,6 +4553,7 @@ HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_adlp_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
+HDA_CODEC_ENTRY(0x8086281f, "Raptorlake-P HDMI", patch_i915_adlp_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4e12af24b4d3..4c0c593f3c0a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2619,6 +2619,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -7005,6 +7006,7 @@ enum {
ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
ALC287_FIXUP_LEGION_16ACHG6,
ALC287_FIXUP_CS35L41_I2C_2,
+ ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED,
ALC245_FIXUP_CS35L41_SPI_2,
ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
ALC245_FIXUP_CS35L41_SPI_4,
@@ -8768,6 +8770,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35l41_fixup_i2c_two,
},
+ [ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs35l41_fixup_i2c_two,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_HP_MUTE_LED,
+ },
[ALC245_FIXUP_CS35L41_SPI_2] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs35l41_fixup_spi_two,
@@ -9024,9 +9032,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
@@ -9162,6 +9170,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
@@ -9264,6 +9273,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index f6275868877a..6fab2ad85bbe 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -2519,8 +2519,8 @@ static int snd_vt1724_create(struct snd_card *card,
*
*/
-static int snd_vt1724_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_vt1724_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2662,6 +2662,12 @@ static int snd_vt1724_probe(struct pci_dev *pci,
return 0;
}
+static int snd_vt1724_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_vt1724_probe(pci, pci_id));
+}
+
#ifdef CONFIG_PM_SLEEP
static int snd_vt1724_suspend(struct device *dev)
{
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index a51032b3ac4d..ae285c0a629c 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -3109,8 +3109,8 @@ static int check_default_spdif_aclink(struct pci_dev *pci)
return 0;
}
-static int snd_intel8x0_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_intel8x0_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct intel8x0 *chip;
@@ -3189,6 +3189,12 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
return 0;
}
+static int snd_intel8x0_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_intel8x0_probe(pci, pci_id));
+}
+
static struct pci_driver intel8x0_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_intel8x0_ids,
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 7de3cb2f17b5..2845cc006d0c 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1178,8 +1178,8 @@ static struct shortname_table {
{ 0 },
};
-static int snd_intel8x0m_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_intel8x0m_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct intel8x0m *chip;
@@ -1225,6 +1225,12 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
return 0;
}
+static int snd_intel8x0m_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_intel8x0m_probe(pci, pci_id));
+}
+
static struct pci_driver intel8x0m_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_intel8x0m_ids,
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 5c9e240ff6a9..33b4f95d65b3 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2355,7 +2355,7 @@ snd_korg1212_probe(struct pci_dev *pci,
err = snd_korg1212_create(card, pci);
if (err < 0)
- return err;
+ goto error;
strcpy(card->driver, "korg1212");
strcpy(card->shortname, "korg1212");
@@ -2366,10 +2366,14 @@ snd_korg1212_probe(struct pci_dev *pci,
err = snd_card_register(card);
if (err < 0)
- return err;
+ goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
static struct pci_driver korg1212_driver = {
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 5269a1d396a5..1aa30e90b86a 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -637,8 +637,8 @@ static int lola_create(struct snd_card *card, struct pci_dev *pci, int dev)
return 0;
}
-static int lola_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __lola_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -687,6 +687,12 @@ static int lola_probe(struct pci_dev *pci,
return 0;
}
+static int lola_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __lola_probe(pci, pci_id));
+}
+
/* PCI IDs */
static const struct pci_device_id lola_ids[] = {
{ PCI_VDEVICE(DIGIGRAM, 0x0001) },
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 168a1084f730..bd9b6148dd6f 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -1019,7 +1019,7 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
err = snd_lx6464es_create(card, pci);
if (err < 0) {
dev_err(card->dev, "error during snd_lx6464es_create\n");
- return err;
+ goto error;
}
strcpy(card->driver, "LX6464ES");
@@ -1036,12 +1036,16 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
err = snd_card_register(card);
if (err < 0)
- return err;
+ goto error;
dev_dbg(chip->card->dev, "initialization successful\n");
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
static struct pci_driver lx6464es_driver = {
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 056838ead21d..261850775c80 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -2637,7 +2637,7 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
/*
*/
static int
-snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2702,6 +2702,12 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
+static int
+snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_m3_probe(pci, pci_id));
+}
+
static struct pci_driver m3_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_m3_ids,
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index c9c178504959..f99a1e96e923 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -1573,7 +1573,6 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci)
chip->coeffs_current = 0;
snd_nm256_init_chip(chip);
- card->private_free = snd_nm256_free;
// pci_set_master(pci); /* needed? */
return 0;
@@ -1680,6 +1679,7 @@ static int snd_nm256_probe(struct pci_dev *pci,
err = snd_card_register(card);
if (err < 0)
return err;
+ card->private_free = snd_nm256_free;
pci_set_drvdata(pci, card);
return 0;
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index 4fb3f2484fdb..92ffe9dc20c5 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -576,7 +576,7 @@ static void oxygen_card_free(struct snd_card *card)
mutex_destroy(&chip->mutex);
}
-int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+static int __oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
struct module *owner,
const struct pci_device_id *ids,
int (*get_model)(struct oxygen *chip,
@@ -701,6 +701,16 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
pci_set_drvdata(pci, card);
return 0;
}
+
+int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+ struct module *owner,
+ const struct pci_device_id *ids,
+ int (*get_model)(struct oxygen *chip,
+ const struct pci_device_id *id))
+{
+ return snd_card_free_on_error(&pci->dev,
+ __oxygen_pci_probe(pci, index, id, owner, ids, get_model));
+}
EXPORT_SYMBOL(oxygen_pci_probe);
#ifdef CONFIG_PM_SLEEP
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 5a987c683c41..b37c877c2c16 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -2023,7 +2023,7 @@ static void snd_riptide_joystick_remove(struct pci_dev *pci)
#endif
static int
-snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -2124,6 +2124,12 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
+static int
+snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_card_riptide_probe(pci, pci_id));
+}
+
static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = snd_riptide_ids,
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 5b6bd9f0b2f7..9c0ac025e143 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1875,7 +1875,7 @@ static void snd_rme32_card_free(struct snd_card *card)
}
static int
-snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
static int dev;
struct rme32 *rme32;
@@ -1927,6 +1927,12 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
return 0;
}
+static int
+snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_rme32_probe(pci, pci_id));
+}
+
static struct pci_driver rme32_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_rme32_ids,
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 8fc811504920..bccb7e0d3d11 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -2430,8 +2430,8 @@ static void snd_rme96_card_free(struct snd_card *card)
}
static int
-snd_rme96_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+__snd_rme96_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct rme96 *rme96;
@@ -2498,6 +2498,12 @@ snd_rme96_probe(struct pci_dev *pci,
return 0;
}
+static int snd_rme96_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_rme96_probe(pci, pci_id));
+}
+
static struct pci_driver rme96_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_rme96_ids,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 96c12dfb24cf..3db641318d3a 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -5444,17 +5444,21 @@ static int snd_hdsp_probe(struct pci_dev *pci,
hdsp->pci = pci;
err = snd_hdsp_create(card, hdsp);
if (err)
- return err;
+ goto error;
strcpy(card->shortname, "Hammerfall DSP");
sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
hdsp->port, hdsp->irq);
err = snd_card_register(card);
if (err)
- return err;
+ goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
static struct pci_driver hdsp_driver = {
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index ff06ee82607c..fa1812e7a49d 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6895,7 +6895,7 @@ static int snd_hdspm_probe(struct pci_dev *pci,
err = snd_hdspm_create(card, hdspm);
if (err < 0)
- return err;
+ goto error;
if (hdspm->io_type != MADIface) {
snprintf(card->shortname, sizeof(card->shortname), "%s_%x",
@@ -6914,12 +6914,16 @@ static int snd_hdspm_probe(struct pci_dev *pci,
err = snd_card_register(card);
if (err < 0)
- return err;
+ goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
static struct pci_driver hdspm_driver = {
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 7755e19aa776..1d614fe89a6a 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -2572,7 +2572,7 @@ static int snd_rme9652_probe(struct pci_dev *pci,
rme9652->pci = pci;
err = snd_rme9652_create(card, rme9652, precise_ptr[dev]);
if (err)
- return err;
+ goto error;
strcpy(card->shortname, rme9652->card_name);
@@ -2580,10 +2580,14 @@ static int snd_rme9652_probe(struct pci_dev *pci,
card->shortname, rme9652->port, rme9652->irq);
err = snd_card_register(card);
if (err)
- return err;
+ goto error;
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
static struct pci_driver rme9652_driver = {
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 0b722b0e0604..fabe393607f8 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1331,8 +1331,8 @@ static int sis_chip_create(struct snd_card *card,
return 0;
}
-static int snd_sis7019_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_sis7019_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct sis7019 *sis;
@@ -1352,8 +1352,8 @@ static int snd_sis7019_probe(struct pci_dev *pci,
if (!codecs)
codecs = SIS_PRIMARY_CODEC_PRESENT;
- rc = snd_card_new(&pci->dev, index, id, THIS_MODULE,
- sizeof(*sis), &card);
+ rc = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
+ sizeof(*sis), &card);
if (rc < 0)
return rc;
@@ -1386,6 +1386,12 @@ static int snd_sis7019_probe(struct pci_dev *pci,
return 0;
}
+static int snd_sis7019_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_sis7019_probe(pci, pci_id));
+}
+
static struct pci_driver sis7019_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_sis7019_ids,
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index c8c49881008f..f91cbf6eeca0 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -1387,8 +1387,8 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
return 0;
}
-static int snd_sonic_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_sonic_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
@@ -1459,6 +1459,12 @@ static int snd_sonic_probe(struct pci_dev *pci,
return 0;
}
+static int snd_sonic_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_sonic_probe(pci, pci_id));
+}
+
static struct pci_driver sonicvibes_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_sonic_ids,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 65514f7e42d7..361b83fd721e 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -2458,8 +2458,8 @@ static int check_dxs_list(struct pci_dev *pci, int revision)
return VIA_DXS_48K;
};
-static int snd_via82xx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct via82xx *chip;
@@ -2569,6 +2569,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
return 0;
}
+static int snd_via82xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
static struct pci_driver via82xx_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_via82xx_ids,
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 234f7fbed236..ca7f024bf8ec 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -1103,8 +1103,8 @@ static int snd_via82xx_create(struct snd_card *card,
}
-static int snd_via82xx_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
{
struct snd_card *card;
struct via82xx_modem *chip;
@@ -1157,6 +1157,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
return 0;
}
+static int snd_via82xx_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
static struct pci_driver via82xx_modem_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_via82xx_modem_ids,
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 33e43013ff77..0d639a33ad96 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -46,35 +46,6 @@
*/
#undef ENABLE_MIC_INPUT
-static struct clk *mclk;
-
-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
- struct snd_soc_dapm_context *dapm,
- enum snd_soc_bias_level level)
-{
- static int mclk_on;
- int ret = 0;
-
- switch (level) {
- case SND_SOC_BIAS_ON:
- case SND_SOC_BIAS_PREPARE:
- if (!mclk_on)
- ret = clk_enable(mclk);
- if (ret == 0)
- mclk_on = 1;
- break;
-
- case SND_SOC_BIAS_OFF:
- case SND_SOC_BIAS_STANDBY:
- if (mclk_on)
- clk_disable(mclk);
- mclk_on = 0;
- break;
- }
-
- return ret;
-}
-
static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
.owner = THIS_MODULE,
.dai_link = &at91sam9g20ek_dai,
.num_links = 1,
- .set_bias_level = at91sam9g20ek_set_bias_level,
.dapm_widgets = at91sam9g20ek_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *codec_np, *cpu_np;
- struct clk *pllb;
struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
int ret;
@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
return -EINVAL;
}
- /*
- * Codec MCLK is supplied by PCK0 - set it up.
- */
- mclk = clk_get(NULL, "pck0");
- if (IS_ERR(mclk)) {
- dev_err(&pdev->dev, "Failed to get MCLK\n");
- ret = PTR_ERR(mclk);
- goto err;
- }
-
- pllb = clk_get(NULL, "pllb");
- if (IS_ERR(pllb)) {
- dev_err(&pdev->dev, "Failed to get PLLB\n");
- ret = PTR_ERR(pllb);
- goto err_mclk;
- }
- ret = clk_set_parent(mclk, pllb);
- clk_put(pllb);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to set MCLK parent\n");
- goto err_mclk;
- }
-
- clk_set_rate(mclk, MCLK_RATE);
-
card->dev = &pdev->dev;
/* Parse device node info */
@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
return ret;
-err_mclk:
- clk_put(mclk);
- mclk = NULL;
err:
atmel_ssc_put_audio(0);
return ret;
@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
- clk_disable(mclk);
- mclk = NULL;
snd_soc_unregister_card(card);
atmel_ssc_put_audio(0);
diff --git a/sound/soc/codecs/cs35l41-lib.c b/sound/soc/codecs/cs35l41-lib.c
index e5a56bcbb223..aa6823fbd1a4 100644
--- a/sound/soc/codecs/cs35l41-lib.c
+++ b/sound/soc/codecs/cs35l41-lib.c
@@ -822,8 +822,8 @@ int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap)
word_offset = otp_map_match->word_offset;
for (i = 0; i < otp_map_match->num_elements; i++) {
- dev_dbg(dev, "bitoffset= %d, word_offset=%d, bit_sum mod 32=%d\n",
- bit_offset, word_offset, bit_sum % 32);
+ dev_dbg(dev, "bitoffset= %d, word_offset=%d, bit_sum mod 32=%d otp_map[i].size = %d\n",
+ bit_offset, word_offset, bit_sum % 32, otp_map[i].size);
if (bit_offset + otp_map[i].size - 1 >= 32) {
otp_val = (otp_mem[word_offset] &
GENMASK(31, bit_offset)) >> bit_offset;
@@ -831,12 +831,14 @@ int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap)
GENMASK(bit_offset + otp_map[i].size - 33, 0)) <<
(32 - bit_offset);
bit_offset += otp_map[i].size - 32;
- } else {
+ } else if (bit_offset + otp_map[i].size - 1 >= 0) {
otp_val = (otp_mem[word_offset] &
GENMASK(bit_offset + otp_map[i].size - 1, bit_offset)
) >> bit_offset;
bit_offset += otp_map[i].size;
- }
+ } else /* both bit_offset and otp_map[i].size are 0 */
+ otp_val = 0;
+
bit_sum += otp_map[i].size;
if (bit_offset == 32) {
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index 6884ae505e33..3143f9cd7277 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -3566,12 +3566,16 @@ static int rx_macro_probe(struct platform_device *pdev)
return PTR_ERR(rx->pds);
base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err;
+ }
rx->regmap = devm_regmap_init_mmio(dev, base, &rx_regmap_config);
- if (IS_ERR(rx->regmap))
- return PTR_ERR(rx->regmap);
+ if (IS_ERR(rx->regmap)) {
+ ret = PTR_ERR(rx->regmap);
+ goto err;
+ }
dev_set_drvdata(dev, rx);
@@ -3632,6 +3636,8 @@ err_mclk:
err_dcodec:
clk_disable_unprepare(rx->macro);
err:
+ lpass_macro_pds_exit(rx->pds);
+
return ret;
}
diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
index 714a411d5337..55503ba480bb 100644
--- a/sound/soc/codecs/lpass-tx-macro.c
+++ b/sound/soc/codecs/lpass-tx-macro.c
@@ -1828,8 +1828,10 @@ static int tx_macro_probe(struct platform_device *pdev)
return PTR_ERR(tx->pds);
base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err;
+ }
/* Update defaults for lpass sc7280 */
if (of_device_is_compatible(np, "qcom,sc7280-lpass-tx-macro")) {
@@ -1846,8 +1848,10 @@ static int tx_macro_probe(struct platform_device *pdev)
}
tx->regmap = devm_regmap_init_mmio(dev, base, &tx_regmap_config);
- if (IS_ERR(tx->regmap))
- return PTR_ERR(tx->regmap);
+ if (IS_ERR(tx->regmap)) {
+ ret = PTR_ERR(tx->regmap);
+ goto err;
+ }
dev_set_drvdata(dev, tx);
@@ -1907,6 +1911,8 @@ err_mclk:
err_dcodec:
clk_disable_unprepare(tx->macro);
err:
+ lpass_macro_pds_exit(tx->pds);
+
return ret;
}
diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
index f3cb596058e0..d18b56e60433 100644
--- a/sound/soc/codecs/lpass-va-macro.c
+++ b/sound/soc/codecs/lpass-va-macro.c
@@ -1434,8 +1434,10 @@ static int va_macro_probe(struct platform_device *pdev)
va->dmic_clk_div = VA_MACRO_CLK_DIV_2;
} else {
ret = va_macro_validate_dmic_sample_rate(sample_rate, va);
- if (!ret)
- return -EINVAL;
+ if (!ret) {
+ ret = -EINVAL;
+ goto err;
+ }
}
base = devm_platform_ioremap_resource(pdev, 0);
@@ -1492,6 +1494,8 @@ err_mclk:
err_dcodec:
clk_disable_unprepare(va->macro);
err:
+ lpass_macro_pds_exit(va->pds);
+
return ret;
}
diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
index 9ad7fc0baf07..20a07c92b2fc 100644
--- a/sound/soc/codecs/msm8916-wcd-digital.c
+++ b/sound/soc/codecs/msm8916-wcd-digital.c
@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
dev_set_drvdata(dev, priv);
- return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
+ ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
msm8916_wcd_digital_dai,
ARRAY_SIZE(msm8916_wcd_digital_dai));
+ if (ret)
+ goto err_mclk;
+
+ return 0;
+
+err_mclk:
+ clk_disable_unprepare(priv->mclk);
err_clk:
clk_disable_unprepare(priv->ahbclk);
return ret;
diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
index 8fffe378618d..cce6f4e7992f 100644
--- a/sound/soc/codecs/rk817_codec.c
+++ b/sound/soc/codecs/rk817_codec.c
@@ -489,7 +489,7 @@ static int rk817_platform_probe(struct platform_device *pdev)
rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
- rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
+ rk817_codec_data->mclk = devm_clk_get(pdev->dev.parent, "mclk");
if (IS_ERR(rk817_codec_data->mclk)) {
dev_dbg(&pdev->dev, "Unable to get mclk\n");
ret = -ENXIO;
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index be68d573a490..2b6c6d6b9771 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -1100,6 +1100,15 @@ void rt5682_jack_detect_handler(struct work_struct *work)
return;
}
+ if (rt5682->is_sdw) {
+ if (pm_runtime_status_suspended(rt5682->slave->dev.parent)) {
+ dev_dbg(&rt5682->slave->dev,
+ "%s: parent device is pm_runtime_status_suspended, skipping jack detection\n",
+ __func__);
+ return;
+ }
+ }
+
dapm = snd_soc_component_get_dapm(rt5682->component);
snd_soc_dapm_mutex_lock(dapm);
@@ -2822,14 +2831,11 @@ static int rt5682_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
for_each_component_dais(component, dai)
if (dai->id == RT5682_AIF1)
- break;
- if (!dai) {
- dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
- RT5682_AIF1);
- return -ENODEV;
- }
+ return rt5682_set_bclk1_ratio(dai, factor);
- return rt5682_set_bclk1_ratio(dai, factor);
+ dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
+ RT5682_AIF1);
+ return -ENODEV;
}
static const struct clk_ops rt5682_dai_clk_ops[RT5682_DAI_NUM_CLKS] = {
diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
index 1cba8ec7cedb..b55f3ac3a267 100644
--- a/sound/soc/codecs/rt5682s.c
+++ b/sound/soc/codecs/rt5682s.c
@@ -2687,14 +2687,11 @@ static int rt5682s_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
for_each_component_dais(component, dai)
if (dai->id == RT5682S_AIF1)
- break;
- if (!dai) {
- dev_err(component->dev, "dai %d not found in component\n",
- RT5682S_AIF1);
- return -ENODEV;
- }
+ return rt5682s_set_bclk1_ratio(dai, factor);
- return rt5682s_set_bclk1_ratio(dai, factor);
+ dev_err(component->dev, "dai %d not found in component\n",
+ RT5682S_AIF1);
+ return -ENODEV;
}
static const struct clk_ops rt5682s_dai_clk_ops[RT5682S_DAI_NUM_CLKS] = {
diff --git a/sound/soc/codecs/rt711.c b/sound/soc/codecs/rt711.c
index 6770825d037a..ea25fd58d43a 100644
--- a/sound/soc/codecs/rt711.c
+++ b/sound/soc/codecs/rt711.c
@@ -245,6 +245,13 @@ static void rt711_jack_detect_handler(struct work_struct *work)
if (!rt711->component->card->instantiated)
return;
+ if (pm_runtime_status_suspended(rt711->slave->dev.parent)) {
+ dev_dbg(&rt711->slave->dev,
+ "%s: parent device is pm_runtime_status_suspended, skipping jack detection\n",
+ __func__);
+ return;
+ }
+
reg = RT711_VERB_GET_PIN_SENSE | RT711_HP_OUT;
ret = regmap_read(rt711->regmap, reg, &jack_status);
if (ret < 0)
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 1e75e93cf28f..6298ebe96e94 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -1274,29 +1274,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
if (sido_src == wcd->sido_input_src)
return 0;
- if (sido_src == SIDO_SOURCE_INTERNAL) {
- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
- usleep_range(100, 110);
- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
- WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
- usleep_range(100, 110);
- regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
- WCD934X_ANA_RCO_BG_EN_MASK, 0);
- usleep_range(100, 110);
- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
- WCD934X_ANA_BUCK_PRE_EN1_MASK,
- WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
- usleep_range(100, 110);
- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
- WCD934X_ANA_BUCK_PRE_EN2_MASK,
- WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
- usleep_range(100, 110);
- regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
- WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
- WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
- usleep_range(100, 110);
- } else if (sido_src == SIDO_SOURCE_RCO_BG) {
+ if (sido_src == SIDO_SOURCE_RCO_BG) {
regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
WCD934X_ANA_RCO_BG_EN_MASK,
WCD934X_ANA_RCO_BG_ENABLE);
@@ -1382,8 +1360,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
WCD934X_EXT_CLK_BUF_EN_MASK |
WCD934X_MCLK_EN_MASK, 0x0);
- wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
-
regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
WCD934X_ANA_BIAS_EN_MASK, 0);
regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 5d4949c2ec9b..b14c6d104e6d 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -602,7 +602,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
ret = wm8731_reset(wm8731->regmap);
if (ret < 0) {
dev_err(dev, "Failed to issue reset: %d\n", ret);
- goto err_regulator_enable;
+ goto err;
}
/* Clear POWEROFF, keep everything else disabled */
@@ -619,10 +619,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
regcache_mark_dirty(wm8731->regmap);
-err_regulator_enable:
- /* Regulators will be enabled by bias management */
- regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
+err:
return ret;
}
@@ -760,21 +757,27 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(wm8731->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- return ret;
+ goto err_regulator_enable;
}
ret = wm8731_hw_init(&i2c->dev, wm8731);
if (ret != 0)
- return ret;
+ goto err_regulator_enable;
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_wm8731, &wm8731_dai, 1);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
- return ret;
+ goto err_regulator_enable;
}
return 0;
+
+err_regulator_enable:
+ /* Regulators will be enabled by bias management */
+ regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
+
+ return ret;
}
static const struct i2c_device_id wm8731_i2c_id[] = {
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 4650a6931a94..ffc24afb5a7a 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -372,7 +372,7 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
continue;
if (ratio == 1 && !support_1_1_ratio)
continue;
- else if (ratio & 1)
+ if ((ratio & 1) && ratio > 1)
continue;
diff = abs((long)clk_rate - ratio * freq);
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 8e037835bc58..f2157944247f 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -364,13 +364,15 @@ static int asoc_simple_set_tdm(struct snd_soc_dai *dai,
struct snd_pcm_hw_params *params)
{
int sample_bits = params_width(params);
- int slot_width = simple_dai->slot_width;
- int slot_count = simple_dai->slots;
+ int slot_width, slot_count;
int i, ret;
if (!simple_dai || !simple_dai->tdm_width_map)
return 0;
+ slot_width = simple_dai->slot_width;
+ slot_count = simple_dai->slots;
+
if (slot_width == 0)
slot_width = sample_bits;
diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c
index 5e0529aa4f1d..9d617831dd20 100644
--- a/sound/soc/intel/boards/sof_es8336.c
+++ b/sound/soc/intel/boards/sof_es8336.c
@@ -27,9 +27,11 @@
#define SOF_ES8336_SSP_CODEC(quirk) ((quirk) & GENMASK(3, 0))
#define SOF_ES8336_SSP_CODEC_MASK (GENMASK(3, 0))
-#define SOF_ES8336_TGL_GPIO_QUIRK BIT(4)
+#define SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK BIT(4)
#define SOF_ES8336_ENABLE_DMIC BIT(5)
#define SOF_ES8336_JD_INVERTED BIT(6)
+#define SOF_ES8336_HEADPHONE_GPIO BIT(7)
+#define SOC_ES8336_HEADSET_MIC1 BIT(8)
static unsigned long quirk;
@@ -39,7 +41,7 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
struct sof_es8336_private {
struct device *codec_dev;
- struct gpio_desc *gpio_pa;
+ struct gpio_desc *gpio_speakers, *gpio_headphone;
struct snd_soc_jack jack;
struct list_head hdmi_pcm_list;
bool speaker_en;
@@ -51,19 +53,31 @@ struct sof_hdmi_pcm {
int device;
};
-static const struct acpi_gpio_params pa_enable_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping acpi_es8336_gpios[] = {
- { "pa-enable-gpios", &pa_enable_gpio, 1 },
+static const struct acpi_gpio_params enable_gpio0 = { 0, 0, true };
+static const struct acpi_gpio_params enable_gpio1 = { 1, 0, true };
+
+static const struct acpi_gpio_mapping acpi_speakers_enable_gpio0[] = {
+ { "speakers-enable-gpios", &enable_gpio0, 1 },
{ }
};
-static const struct acpi_gpio_params quirk_pa_enable_gpio = { 1, 0, true };
-static const struct acpi_gpio_mapping quirk_acpi_es8336_gpios[] = {
- { "pa-enable-gpios", &quirk_pa_enable_gpio, 1 },
+static const struct acpi_gpio_mapping acpi_speakers_enable_gpio1[] = {
+ { "speakers-enable-gpios", &enable_gpio1, 1 },
+};
+
+static const struct acpi_gpio_mapping acpi_enable_both_gpios[] = {
+ { "speakers-enable-gpios", &enable_gpio0, 1 },
+ { "headphone-enable-gpios", &enable_gpio1, 1 },
{ }
};
-static const struct acpi_gpio_mapping *gpio_mapping = acpi_es8336_gpios;
+static const struct acpi_gpio_mapping acpi_enable_both_gpios_rev_order[] = {
+ { "speakers-enable-gpios", &enable_gpio1, 1 },
+ { "headphone-enable-gpios", &enable_gpio0, 1 },
+ { }
+};
+
+static const struct acpi_gpio_mapping *gpio_mapping = acpi_speakers_enable_gpio0;
static void log_quirks(struct device *dev)
{
@@ -71,10 +85,14 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk SSP%ld\n", SOF_ES8336_SSP_CODEC(quirk));
if (quirk & SOF_ES8336_ENABLE_DMIC)
dev_info(dev, "quirk DMIC enabled\n");
- if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
- dev_info(dev, "quirk TGL GPIO enabled\n");
+ if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+ dev_info(dev, "Speakers GPIO1 quirk enabled\n");
+ if (quirk & SOF_ES8336_HEADPHONE_GPIO)
+ dev_info(dev, "quirk headphone GPIO enabled\n");
if (quirk & SOF_ES8336_JD_INVERTED)
dev_info(dev, "quirk JD inverted enabled\n");
+ if (quirk & SOC_ES8336_HEADSET_MIC1)
+ dev_info(dev, "quirk headset at mic1 port enabled\n");
}
static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
@@ -83,12 +101,23 @@ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
struct snd_soc_card *card = w->dapm->card;
struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
+ if (priv->speaker_en == !SND_SOC_DAPM_EVENT_ON(event))
+ return 0;
+
+ priv->speaker_en = !SND_SOC_DAPM_EVENT_ON(event);
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ msleep(70);
+
+ gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
+
+ if (!(quirk & SOF_ES8336_HEADPHONE_GPIO))
+ return 0;
+
if (SND_SOC_DAPM_EVENT_ON(event))
- priv->speaker_en = false;
- else
- priv->speaker_en = true;
+ msleep(70);
- gpiod_set_value_cansleep(priv->gpio_pa, priv->speaker_en);
+ gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
return 0;
}
@@ -114,18 +143,23 @@ static const struct snd_soc_dapm_route sof_es8316_audio_map[] = {
/*
* There is no separate speaker output instead the speakers are muxed to
- * the HP outputs. The mux is controlled by the "Speaker Power" supply.
+ * the HP outputs. The mux is controlled Speaker and/or headphone switch.
*/
{"Speaker", NULL, "HPOL"},
{"Speaker", NULL, "HPOR"},
{"Speaker", NULL, "Speaker Power"},
};
-static const struct snd_soc_dapm_route sof_es8316_intmic_in1_map[] = {
+static const struct snd_soc_dapm_route sof_es8316_headset_mic2_map[] = {
{"MIC1", NULL, "Internal Mic"},
{"MIC2", NULL, "Headset Mic"},
};
+static const struct snd_soc_dapm_route sof_es8316_headset_mic1_map[] = {
+ {"MIC2", NULL, "Internal Mic"},
+ {"MIC1", NULL, "Headset Mic"},
+};
+
static const struct snd_soc_dapm_route dmic_map[] = {
/* digital mics */
{"DMic", NULL, "SoC DMIC"},
@@ -199,8 +233,13 @@ static int sof_es8316_init(struct snd_soc_pcm_runtime *runtime)
card->dapm.idle_bias_off = true;
- custom_map = sof_es8316_intmic_in1_map;
- num_routes = ARRAY_SIZE(sof_es8316_intmic_in1_map);
+ if (quirk & SOC_ES8336_HEADSET_MIC1) {
+ custom_map = sof_es8316_headset_mic1_map;
+ num_routes = ARRAY_SIZE(sof_es8316_headset_mic1_map);
+ } else {
+ custom_map = sof_es8316_headset_mic2_map;
+ num_routes = ARRAY_SIZE(sof_es8316_headset_mic2_map);
+ }
ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
if (ret)
@@ -233,8 +272,14 @@ static int sof_es8336_quirk_cb(const struct dmi_system_id *id)
{
quirk = (unsigned long)id->driver_data;
- if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
- gpio_mapping = quirk_acpi_es8336_gpios;
+ if (quirk & SOF_ES8336_HEADPHONE_GPIO) {
+ if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+ gpio_mapping = acpi_enable_both_gpios;
+ else
+ gpio_mapping = acpi_enable_both_gpios_rev_order;
+ } else if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK) {
+ gpio_mapping = acpi_speakers_enable_gpio1;
+ }
return 1;
}
@@ -257,7 +302,16 @@ static const struct dmi_system_id sof_es8336_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "IP3 tech"),
DMI_MATCH(DMI_BOARD_NAME, "WN1"),
},
- .driver_data = (void *)(SOF_ES8336_TGL_GPIO_QUIRK)
+ .driver_data = (void *)(SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+ },
+ {
+ .callback = sof_es8336_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+ DMI_MATCH(DMI_BOARD_NAME, "BOHB-WAX9-PCB-B2"),
+ },
+ .driver_data = (void *)(SOF_ES8336_HEADPHONE_GPIO |
+ SOC_ES8336_HEADSET_MIC1)
},
{}
};
@@ -585,10 +639,17 @@ static int sof_es8336_probe(struct platform_device *pdev)
if (ret)
dev_warn(codec_dev, "unable to add GPIO mapping table\n");
- priv->gpio_pa = gpiod_get_optional(codec_dev, "pa-enable", GPIOD_OUT_LOW);
- if (IS_ERR(priv->gpio_pa)) {
- ret = dev_err_probe(dev, PTR_ERR(priv->gpio_pa),
- "could not get pa-enable GPIO\n");
+ priv->gpio_speakers = gpiod_get_optional(codec_dev, "speakers-enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpio_speakers)) {
+ ret = dev_err_probe(dev, PTR_ERR(priv->gpio_speakers),
+ "could not get speakers-enable GPIO\n");
+ goto err_put_codec;
+ }
+
+ priv->gpio_headphone = gpiod_get_optional(codec_dev, "headphone-enable", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->gpio_headphone)) {
+ ret = dev_err_probe(dev, PTR_ERR(priv->gpio_headphone),
+ "could not get headphone-enable GPIO\n");
goto err_put_codec;
}
@@ -604,7 +665,7 @@ static int sof_es8336_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_card(dev, card);
if (ret) {
- gpiod_put(priv->gpio_pa);
+ gpiod_put(priv->gpio_speakers);
dev_err(dev, "snd_soc_register_card failed: %d\n", ret);
goto err_put_codec;
}
@@ -622,7 +683,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
- gpiod_put(priv->gpio_pa);
+ gpiod_put(priv->gpio_speakers);
device_remove_software_node(priv->codec_dev);
put_device(priv->codec_dev);
diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
index ebec4d15edaa..7126fcb63d90 100644
--- a/sound/soc/intel/boards/sof_rt5682.c
+++ b/sound/soc/intel/boards/sof_rt5682.c
@@ -212,6 +212,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
SOF_SSP_BT_OFFLOAD_PRESENT),
},
+ {
+ .callback = sof_rt5682_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+ },
+ .driver_data = (void *)(SOF_RT5682_MCLK_EN |
+ SOF_RT5682_SSP_CODEC(0) |
+ SOF_SPEAKER_AMP_PRESENT |
+ SOF_MAX98360A_SPEAKER_AMP_PRESENT |
+ SOF_RT5682_SSP_AMP(2) |
+ SOF_RT5682_NUM_HDMIDEV(4)),
+ },
{}
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
index 6edc9b7108cd..ef19150e7b2e 100644
--- a/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-tgl-match.c
@@ -132,13 +132,13 @@ static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = {
{
.adr = 0x000123019F837300ull,
.num_endpoints = 1,
- .endpoints = &spk_l_endpoint,
+ .endpoints = &spk_r_endpoint,
.name_prefix = "Right"
},
{
.adr = 0x000127019F837300ull,
.num_endpoints = 1,
- .endpoints = &spk_r_endpoint,
+ .endpoints = &spk_l_endpoint,
.name_prefix = "Left"
}
};
diff --git a/sound/soc/meson/aiu-acodec-ctrl.c b/sound/soc/meson/aiu-acodec-ctrl.c
index 27a6d3259c50..22e181646bc3 100644
--- a/sound/soc/meson/aiu-acodec-ctrl.c
+++ b/sound/soc/meson/aiu-acodec-ctrl.c
@@ -193,6 +193,9 @@ static const struct snd_soc_component_driver aiu_acodec_ctrl_component = {
.of_xlate_dai_name = aiu_acodec_of_xlate_dai_name,
.endianness = 1,
.non_legacy_dai_naming = 1,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_prefix = "acodec",
+#endif
};
int aiu_acodec_ctrl_register_component(struct device *dev)
diff --git a/sound/soc/meson/aiu-codec-ctrl.c b/sound/soc/meson/aiu-codec-ctrl.c
index c3ea733fce91..59ee66fc2bcd 100644
--- a/sound/soc/meson/aiu-codec-ctrl.c
+++ b/sound/soc/meson/aiu-codec-ctrl.c
@@ -140,6 +140,9 @@ static const struct snd_soc_component_driver aiu_hdmi_ctrl_component = {
.of_xlate_dai_name = aiu_hdmi_of_xlate_dai_name,
.endianness = 1,
.non_legacy_dai_naming = 1,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_prefix = "hdmi",
+#endif
};
int aiu_hdmi_ctrl_register_component(struct device *dev)
diff --git a/sound/soc/meson/aiu.c b/sound/soc/meson/aiu.c
index d299a70db7e5..88e611e64d14 100644
--- a/sound/soc/meson/aiu.c
+++ b/sound/soc/meson/aiu.c
@@ -103,6 +103,9 @@ static const struct snd_soc_component_driver aiu_cpu_component = {
.pointer = aiu_fifo_pointer,
.probe = aiu_cpu_component_probe,
.remove = aiu_cpu_component_remove,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_prefix = "cpu",
+#endif
};
static struct snd_soc_dai_driver aiu_cpu_dai_drv[] = {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index ce153ac2c3ab..8c7da82a62ca 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2587,6 +2587,11 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
component->dev = dev;
component->driver = driver;
+#ifdef CONFIG_DEBUG_FS
+ if (!component->debugfs_prefix)
+ component->debugfs_prefix = driver->debugfs_prefix;
+#endif
+
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_component_initialize);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b435b5c4cfb7..ca917a849c42 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1687,8 +1687,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
switch (w->id) {
case snd_soc_dapm_pre:
if (!w->event)
- list_for_each_entry_safe_continue(w, n, list,
- power_list);
+ continue;
if (event == SND_SOC_DAPM_STREAM_START)
ret = w->event(w,
@@ -1700,8 +1699,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
case snd_soc_dapm_post:
if (!w->event)
- list_for_each_entry_safe_continue(w, n, list,
- power_list);
+ continue;
if (event == SND_SOC_DAPM_STREAM_START)
ret = w->event(w,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 9a954680d492..11c9853e9e80 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1214,7 +1214,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
be_substream->pcm->nonatomic = 1;
}
- dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_ATOMIC);
+ dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_KERNEL);
if (!dpcm)
return -ENOMEM;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 72e50df7052c..3bb90a819650 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1436,12 +1436,12 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
if (!kc)
- goto err;
+ goto hdr_err;
kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
GFP_KERNEL);
if (!kcontrol_type)
- goto err;
+ goto hdr_err;
for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
index 4c9596742844..12f5cff22448 100644
--- a/sound/soc/sof/sof-pci-dev.c
+++ b/sound/soc/sof/sof-pci-dev.c
@@ -83,7 +83,14 @@ static const struct dmi_system_id sof_tplg_table[] = {
},
.driver_data = "sof-adl-max98357a-rt5682-2way.tplg",
},
-
+ {
+ .callback = sof_tplg_cb,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+ DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+ },
+ .driver_data = "sof-adl-max98357a-rt5682.tplg",
+ },
{}
};
diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c
index 9b11e9795a7a..3e5b319b44c7 100644
--- a/sound/soc/sof/topology.c
+++ b/sound/soc/sof/topology.c
@@ -904,8 +904,10 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
return -ENOMEM;
scontrol->name = kstrdup(hdr->name, GFP_KERNEL);
- if (!scontrol->name)
+ if (!scontrol->name) {
+ kfree(scontrol);
return -ENOMEM;
+ }
scontrol->scomp = scomp;
scontrol->access = kc->access;
@@ -941,11 +943,13 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
default:
dev_warn(scomp->dev, "control type not supported %d:%d:%d\n",
hdr->ops.get, hdr->ops.put, hdr->ops.info);
+ kfree(scontrol->name);
kfree(scontrol);
return 0;
}
if (ret < 0) {
+ kfree(scontrol->name);
kfree(scontrol);
return ret;
}
@@ -1068,6 +1072,46 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
return 0;
}
+static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+ struct snd_soc_dapm_widget *w)
+{
+ struct snd_soc_card *card = scomp->card;
+ struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *cpu_dai;
+ int i;
+
+ if (!w->sname)
+ return;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ /* does stream match DAI link ? */
+ if (!rtd->dai_link->stream_name ||
+ strcmp(w->sname, rtd->dai_link->stream_name))
+ continue;
+
+ switch (w->id) {
+ case snd_soc_dapm_dai_out:
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ if (cpu_dai->capture_widget == w) {
+ cpu_dai->capture_widget = NULL;
+ break;
+ }
+ }
+ break;
+ case snd_soc_dapm_dai_in:
+ for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+ if (cpu_dai->playback_widget == w) {
+ cpu_dai->playback_widget = NULL;
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
/* bind PCM ID to host component ID */
static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
int dir)
@@ -1353,6 +1397,9 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
if (dai)
list_del(&dai->list);
+
+ sof_disconnect_dai_widget(scomp, widget);
+
break;
default:
break;
@@ -1380,6 +1427,7 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
}
kfree(scontrol->ipc_control_data);
list_del(&scontrol->list);
+ kfree(scontrol->name);
kfree(scontrol);
}
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 2c01649c70f6..7c6ca2b433a5 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1194,6 +1194,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
} while (drain_urbs && timeout);
finish_wait(&ep->drain_wait, &wait);
}
+ port->active = 0;
spin_unlock_irq(&ep->buffer_lock);
}
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 64f5544d0a0a..7ef7a8abcc2b 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -599,6 +599,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
.id = USB_ID(0x0db0, 0x419c),
.map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
},
+ { /* MSI MAG X570S Torpedo Max */
+ .id = USB_ID(0x0db0, 0xa073),
+ .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
+ },
{ /* MSI TRX40 */
.id = USB_ID(0x0db0, 0x543d),
.map = trx40_mobo_map,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index cec6e91afea2..6d699065e81a 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -669,9 +669,9 @@ static const struct snd_pcm_hardware snd_usb_hardware =
SNDRV_PCM_INFO_PAUSE,
.channels_min = 1,
.channels_max = 256,
- .buffer_bytes_max = 1024 * 1024,
+ .buffer_bytes_max = INT_MAX, /* limited by BUFFER_TIME later */
.period_bytes_min = 64,
- .period_bytes_max = 512 * 1024,
+ .period_bytes_max = INT_MAX, /* limited by PERIOD_TIME later */
.periods_min = 2,
.periods_max = 1024,
};
@@ -1064,6 +1064,18 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
return err;
}
+ /* set max period and buffer sizes for 1 and 2 seconds, respectively */
+ err = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+ 0, 1000000);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+ 0, 2000000);
+ if (err < 0)
+ return err;
+
/* additional hw constraints for implicit fb */
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
hw_rule_format_implicit_fb, subs,
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 167834133b9b..b8359a0aa008 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -8,7 +8,7 @@
*/
/* handling of USB vendor/product ID pairs as 32-bit numbers */
-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
+#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
#define USB_ID_VENDOR(id) ((id) >> 16)
#define USB_ID_PRODUCT(id) ((u16)(id))
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index b00634663346..0d828e35b401 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1652,7 +1652,7 @@ static void hdmi_lpe_audio_free(struct snd_card *card)
* This function is called when the i915 driver creates the
* hdmi-lpe-audio platform device.
*/
-static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
{
struct snd_card *card;
struct snd_intelhad_card *card_ctx;
@@ -1815,6 +1815,11 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
return 0;
}
+static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+{
+ return snd_card_free_on_error(&pdev->dev, __hdmi_lpe_audio_probe(pdev));
+}
+
static const struct dev_pm_ops hdmi_lpe_audio_pm = {
SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
};
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 0eb90d21049e..ee15311b6be1 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -128,9 +128,9 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
-/* SRBDS support */
#define MSR_IA32_MCU_OPT_CTRL 0x00000123
-#define RNGDS_MITG_DIS BIT(0)
+#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
+#define RTM_ALLOW BIT(1) /* TSX development mode */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index f41d8a0eb1a4..0616409513eb 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -28,7 +28,13 @@ static inline void *kzalloc(size_t size, gfp_t gfp)
return kmalloc(size, gfp | __GFP_ZERO);
}
-void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
+struct list_lru;
+
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *, int flags);
+static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
+{
+ return kmem_cache_alloc_lru(cachep, NULL, flags);
+}
void kmem_cache_free(struct kmem_cache *cachep, void *objp);
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 1b15ba13c477..a09315538a30 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
{
struct perf_evsel *evsel;
const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
- const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
perf_evlist__for_each_entry(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
return -ENOMEM;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index bd0c2c828940..ca5b74603008 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -184,6 +184,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
"do_group_exit",
"stop_this_cpu",
"__invalid_creds",
+ "cpu_startup_entry",
};
if (!func)
@@ -559,12 +560,12 @@ static int add_dead_ends(struct objtool_file *file)
else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
- WARN("can't find unreachable insn at %s+0x%x",
+ WARN("can't find unreachable insn at %s+0x%lx",
reloc->sym->sec->name, reloc->addend);
return -1;
}
} else {
- WARN("can't find unreachable insn at %s+0x%x",
+ WARN("can't find unreachable insn at %s+0x%lx",
reloc->sym->sec->name, reloc->addend);
return -1;
}
@@ -594,12 +595,12 @@ reachable:
else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
insn = find_last_insn(file, reloc->sym->sec);
if (!insn) {
- WARN("can't find reachable insn at %s+0x%x",
+ WARN("can't find reachable insn at %s+0x%lx",
reloc->sym->sec->name, reloc->addend);
return -1;
}
} else {
- WARN("can't find reachable insn at %s+0x%x",
+ WARN("can't find reachable insn at %s+0x%lx",
reloc->sym->sec->name, reloc->addend);
return -1;
}
@@ -1271,12 +1272,19 @@ static bool is_first_func_insn(struct objtool_file *file, struct instruction *in
*/
static int add_jump_destinations(struct objtool_file *file)
{
- struct instruction *insn;
+ struct instruction *insn, *jump_dest;
struct reloc *reloc;
struct section *dest_sec;
unsigned long dest_off;
for_each_insn(file, insn) {
+ if (insn->jump_dest) {
+ /*
+ * handle_group_alt() may have previously set
+ * 'jump_dest' for some alternatives.
+ */
+ continue;
+ }
if (!is_static_jump(insn))
continue;
@@ -1291,7 +1299,10 @@ static int add_jump_destinations(struct objtool_file *file)
add_retpoline_call(file, insn);
continue;
} else if (insn->func) {
- /* internal or external sibling call (with reloc) */
+ /*
+ * External sibling call or internal sibling call with
+ * STT_FUNC reloc.
+ */
add_call_dest(file, insn, reloc->sym, true);
continue;
} else if (reloc->sym->sec->idx) {
@@ -1303,17 +1314,8 @@ static int add_jump_destinations(struct objtool_file *file)
continue;
}
- insn->jump_dest = find_insn(file, dest_sec, dest_off);
- if (!insn->jump_dest) {
-
- /*
- * This is a special case where an alt instruction
- * jumps past the end of the section. These are
- * handled later in handle_group_alt().
- */
- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
- continue;
-
+ jump_dest = find_insn(file, dest_sec, dest_off);
+ if (!jump_dest) {
WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
insn->sec, insn->offset, dest_sec->name,
dest_off);
@@ -1323,8 +1325,8 @@ static int add_jump_destinations(struct objtool_file *file)
/*
* Cross-function jump.
*/
- if (insn->func && insn->jump_dest->func &&
- insn->func != insn->jump_dest->func) {
+ if (insn->func && jump_dest->func &&
+ insn->func != jump_dest->func) {
/*
* For GCC 8+, create parent/child links for any cold
@@ -1342,16 +1344,22 @@ static int add_jump_destinations(struct objtool_file *file)
* subfunction is through a jump table.
*/
if (!strstr(insn->func->name, ".cold") &&
- strstr(insn->jump_dest->func->name, ".cold")) {
- insn->func->cfunc = insn->jump_dest->func;
- insn->jump_dest->func->pfunc = insn->func;
+ strstr(jump_dest->func->name, ".cold")) {
+ insn->func->cfunc = jump_dest->func;
+ jump_dest->func->pfunc = insn->func;
- } else if (!same_function(insn, insn->jump_dest) &&
- is_first_func_insn(file, insn->jump_dest)) {
- /* internal sibling call (without reloc) */
- add_call_dest(file, insn, insn->jump_dest->func, true);
+ } else if (!same_function(insn, jump_dest) &&
+ is_first_func_insn(file, jump_dest)) {
+ /*
+ * Internal sibling call without reloc or with
+ * STT_SECTION reloc.
+ */
+ add_call_dest(file, insn, jump_dest->func, true);
+ continue;
}
}
+
+ insn->jump_dest = jump_dest;
}
return 0;
@@ -1540,13 +1548,13 @@ static int handle_group_alt(struct objtool_file *file,
continue;
dest_off = arch_jump_destination(insn);
- if (dest_off == special_alt->new_off + special_alt->new_len)
+ if (dest_off == special_alt->new_off + special_alt->new_len) {
insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
-
- if (!insn->jump_dest) {
- WARN_FUNC("can't find alternative jump destination",
- insn->sec, insn->offset);
- return -1;
+ if (!insn->jump_dest) {
+ WARN_FUNC("can't find alternative jump destination",
+ insn->sec, insn->offset);
+ return -1;
+ }
}
}
@@ -2245,14 +2253,14 @@ static int decode_sections(struct objtool_file *file)
return ret;
/*
- * Must be before add_special_section_alts() as that depends on
- * jump_dest being set.
+ * Must be before add_jump_destinations(), which depends on 'func'
+ * being set for alternatives, to enable proper sibling call detection.
*/
- ret = add_jump_destinations(file);
+ ret = add_special_section_alts(file);
if (ret)
return ret;
- ret = add_special_section_alts(file);
+ ret = add_jump_destinations(file);
if (ret)
return ret;
@@ -3210,9 +3218,8 @@ validate_ibt_reloc(struct objtool_file *file, struct reloc *reloc)
static void warn_noendbr(const char *msg, struct section *sec, unsigned long offset,
struct instruction *dest)
{
- WARN_FUNC("%srelocation to !ENDBR: %s+0x%lx", sec, offset, msg,
- dest->func ? dest->func->name : dest->sec->name,
- dest->func ? dest->offset - dest->func->offset : dest->offset);
+ WARN_FUNC("%srelocation to !ENDBR: %s", sec, offset, msg,
+ offstr(dest->sec, dest->offset));
}
static void validate_ibt_dest(struct objtool_file *file, struct instruction *insn,
@@ -3303,7 +3310,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
while (1) {
next_insn = next_insn_to_validate(file, insn);
- if (file->c_file && func && insn->func && func != insn->func->pfunc) {
+ if (func && insn->func && func != insn->func->pfunc) {
WARN("%s() falls through to next function %s()",
func->name, insn->func->name);
return 1;
@@ -3816,11 +3823,8 @@ static int validate_ibt(struct objtool_file *file)
struct instruction *dest;
dest = validate_ibt_reloc(file, reloc);
- if (is_data && dest && !dest->noendbr) {
- warn_noendbr("data ", reloc->sym->sec,
- reloc->sym->offset + reloc->addend,
- dest);
- }
+ if (is_data && dest && !dest->noendbr)
+ warn_noendbr("data ", sec, reloc->offset, dest);
}
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index d7b99a737496..ebf2ba5755c1 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -546,7 +546,7 @@ static struct section *elf_create_reloc_section(struct elf *elf,
int reltype);
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
- unsigned int type, struct symbol *sym, int addend)
+ unsigned int type, struct symbol *sym, long addend)
{
struct reloc *reloc;
@@ -575,37 +575,180 @@ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
return 0;
}
-int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
- unsigned long offset, unsigned int type,
- struct section *insn_sec, unsigned long insn_off)
+/*
+ * Ensure that any reloc section containing references to @sym is marked
+ * changed such that it will get re-generated in elf_rebuild_reloc_sections()
+ * with the new symbol index.
+ */
+static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym)
+{
+ struct section *sec;
+
+ list_for_each_entry(sec, &elf->sections, list) {
+ struct reloc *reloc;
+
+ if (sec->changed)
+ continue;
+
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym == sym) {
+ sec->changed = true;
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * Move the first global symbol, as per sh_info, into a new, higher symbol
+ * index. This fees up the shndx for a new local symbol.
+ */
+static int elf_move_global_symbol(struct elf *elf, struct section *symtab,
+ struct section *symtab_shndx)
{
+ Elf_Data *data, *shndx_data = NULL;
+ Elf32_Word first_non_local;
struct symbol *sym;
- int addend;
+ Elf_Scn *s;
- if (insn_sec->sym) {
- sym = insn_sec->sym;
- addend = insn_off;
+ first_non_local = symtab->sh.sh_info;
- } else {
- /*
- * The Clang assembler strips section symbols, so we have to
- * reference the function symbol instead:
- */
- sym = find_symbol_containing(insn_sec, insn_off);
- if (!sym) {
- /*
- * Hack alert. This happens when we need to reference
- * the NOP pad insn immediately after the function.
- */
- sym = find_symbol_containing(insn_sec, insn_off - 1);
+ sym = find_symbol_by_index(elf, first_non_local);
+ if (!sym) {
+ WARN("no non-local symbols !?");
+ return first_non_local;
+ }
+
+ s = elf_getscn(elf->elf, symtab->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return -1;
+ }
+
+ data->d_buf = &sym->sym;
+ data->d_size = sizeof(sym->sym);
+ data->d_align = 1;
+ data->d_type = ELF_T_SYM;
+
+ sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
+ elf_dirty_reloc_sym(elf, sym);
+
+ symtab->sh.sh_info += 1;
+ symtab->sh.sh_size += data->d_size;
+ symtab->changed = true;
+
+ if (symtab_shndx) {
+ s = elf_getscn(elf->elf, symtab_shndx->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
}
- if (!sym) {
- WARN("can't find symbol containing %s+0x%lx", insn_sec->name, insn_off);
+ shndx_data = elf_newdata(s);
+ if (!shndx_data) {
+ WARN_ELF("elf_newshndx_data");
return -1;
}
- addend = insn_off - sym->offset;
+ shndx_data->d_buf = &sym->sec->idx;
+ shndx_data->d_size = sizeof(Elf32_Word);
+ shndx_data->d_align = 4;
+ shndx_data->d_type = ELF_T_WORD;
+
+ symtab_shndx->sh.sh_size += 4;
+ symtab_shndx->changed = true;
+ }
+
+ return first_non_local;
+}
+
+static struct symbol *
+elf_create_section_symbol(struct elf *elf, struct section *sec)
+{
+ struct section *symtab, *symtab_shndx;
+ Elf_Data *shndx_data = NULL;
+ struct symbol *sym;
+ Elf32_Word shndx;
+
+ symtab = find_section_by_name(elf, ".symtab");
+ if (symtab) {
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx)
+ shndx_data = symtab_shndx->data;
+ } else {
+ WARN("no .symtab");
+ return NULL;
+ }
+
+ sym = malloc(sizeof(*sym));
+ if (!sym) {
+ perror("malloc");
+ return NULL;
+ }
+ memset(sym, 0, sizeof(*sym));
+
+ sym->idx = elf_move_global_symbol(elf, symtab, symtab_shndx);
+ if (sym->idx < 0) {
+ WARN("elf_move_global_symbol");
+ return NULL;
+ }
+
+ sym->name = sec->name;
+ sym->sec = sec;
+
+ // st_name 0
+ sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
+ // st_other 0
+ // st_value 0
+ // st_size 0
+ shndx = sec->idx;
+ if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) {
+ sym->sym.st_shndx = shndx;
+ if (!shndx_data)
+ shndx = 0;
+ } else {
+ sym->sym.st_shndx = SHN_XINDEX;
+ if (!shndx_data) {
+ WARN("no .symtab_shndx");
+ return NULL;
+ }
+ }
+
+ if (!gelf_update_symshndx(symtab->data, shndx_data, sym->idx, &sym->sym, shndx)) {
+ WARN_ELF("gelf_update_symshndx");
+ return NULL;
+ }
+
+ elf_add_symbol(elf, sym);
+
+ return sym;
+}
+
+int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int type,
+ struct section *insn_sec, unsigned long insn_off)
+{
+ struct symbol *sym = insn_sec->sym;
+ int addend = insn_off;
+
+ if (!sym) {
+ /*
+ * Due to how weak functions work, we must use section based
+ * relocations. Symbol based relocations would result in the
+ * weak and non-weak function annotations being overlaid on the
+ * non-weak function after linking.
+ */
+ sym = elf_create_section_symbol(elf, insn_sec);
+ if (!sym)
+ return -1;
+
+ insn_sec->sym = sym;
}
return elf_add_reloc(elf, sec, offset, type, sym, addend);
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index 22ba7e2b816e..9b36802ed86f 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -73,7 +73,7 @@ struct reloc {
struct symbol *sym;
unsigned long offset;
unsigned int type;
- int addend;
+ long addend;
int idx;
bool jump_table_start;
};
@@ -135,7 +135,7 @@ struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
- unsigned int type, struct symbol *sym, int addend);
+ unsigned int type, struct symbol *sym, long addend);
int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int type,
struct section *insn_sec, unsigned long insn_off);
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
index 7a5c13a78f87..a6e72d916807 100644
--- a/tools/objtool/include/objtool/objtool.h
+++ b/tools/objtool/include/objtool/objtool.h
@@ -27,7 +27,7 @@ struct objtool_file {
struct list_head static_call_list;
struct list_head mcount_loc_list;
struct list_head endbr_list;
- bool ignore_unreachables, c_file, hints, rodata;
+ bool ignore_unreachables, hints, rodata;
unsigned int nr_endbr;
unsigned int nr_endbr_int;
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index b09946f4e1d6..843ff3c2f28e 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -129,7 +129,6 @@ struct objtool_file *objtool_open_read(const char *_objname)
INIT_LIST_HEAD(&file.static_call_list);
INIT_LIST_HEAD(&file.mcount_loc_list);
INIT_LIST_HEAD(&file.endbr_list);
- file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
file.ignore_unreachables = no_unreachable;
file.hints = false;
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index af4d63af8072..e8b577d33e53 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -148,6 +148,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
+ u64 bit;
sper->evlist = evlist;
@@ -245,6 +246,15 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
*/
evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
+ /*
+ * The PHYS_ADDR flag does not affect the driver behaviour, it is used to
+ * inform that the resulting output's SPE samples contain physical addresses
+ * where applicable.
+ */
+ bit = perf_pmu__format_bits(&arm_spe_pmu->format, "pa_enable");
+ if (arm_spe_evsel->core.attr.config & bit)
+ evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR);
+
/* Add dummy event to keep tracking */
err = parse_events(evlist, "dummy:u", NULL);
if (err)
diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c
index d2ce31e28cd7..41c1596e5207 100644
--- a/tools/perf/arch/arm64/util/machine.c
+++ b/tools/perf/arch/arm64/util/machine.c
@@ -8,27 +8,6 @@
#include "callchain.h"
#include "record.h"
-/* On arm64, kernel text segment starts at high memory address,
- * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory
- * address, like 0xffff 0000 00ax xxxx. When only small amount of
- * memory is used by modules, gap between end of module's text segment
- * and start of kernel text segment may reach 2G.
- * Therefore do not fill this gap and do not assign it to the kernel dso map.
- */
-
-#define SYMBOL_LIMIT (1 << 12) /* 4K */
-
-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
-{
- if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) ||
- (strchr(p->name, '[') == NULL && strchr(c->name, '[')))
- /* Limit range of last symbol in module and kernel */
- p->end += SYMBOL_LIMIT;
- else
- p->end = c->start;
- pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end);
-}
-
void arch__add_leaf_frame_record_opts(struct record_opts *opts)
{
opts->sample_user_regs |= sample_reg_masks[PERF_REG_ARM64_LR].mask;
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index 8a79c4126e5b..0115f3166568 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,5 +1,4 @@
perf-y += header.o
-perf-y += machine.o
perf-y += kvm-stat.o
perf-y += perf_regs.o
perf-y += mem-events.o
diff --git a/tools/perf/arch/powerpc/util/machine.c b/tools/perf/arch/powerpc/util/machine.c
deleted file mode 100644
index e652a1aa8132..000000000000
--- a/tools/perf/arch/powerpc/util/machine.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <string.h>
-#include <internal/lib.h> // page_size
-#include "debug.h"
-#include "symbol.h"
-
-/* On powerpc kernel text segment start at memory addresses, 0xc000000000000000
- * whereas the modules are located at very high memory addresses,
- * for example 0xc00800000xxxxxxx. The gap between end of kernel text segment
- * and beginning of first module's text segment is very high.
- * Therefore do not fill this gap and do not assign it to the kernel dso map.
- */
-
-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
-{
- if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
- /* Limit the range of last kernel symbol */
- p->end += page_size;
- else
- p->end = c->start;
- pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end);
-}
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index 7644a4f6d4a4..98bc3f39d5f3 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -35,19 +35,3 @@ int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
return 0;
}
-
-/* On s390 kernel text segment start is located at very low memory addresses,
- * for example 0x10000. Modules are located at very high memory addresses,
- * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
- * and beginning of first module's text segment is very big.
- * Therefore do not fill this gap and do not assign it to the kernel dso map.
- */
-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
-{
- if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
- /* Last kernel symbol mapped to end of page */
- p->end = roundup(p->end, page_size);
- else
- p->end = c->start;
- pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end);
-}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index f2640179ada9..44e1f8a44087 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -34,6 +34,7 @@
#include <linux/numa.h>
#include <linux/zalloc.h>
+#include "../util/header.h"
#include <numa.h>
#include <numaif.h>
@@ -54,7 +55,7 @@
struct thread_data {
int curr_cpu;
- cpu_set_t bind_cpumask;
+ cpu_set_t *bind_cpumask;
int bind_node;
u8 *process_data;
int process_nr;
@@ -266,71 +267,115 @@ static bool node_has_cpus(int node)
return ret;
}
-static cpu_set_t bind_to_cpu(int target_cpu)
+static cpu_set_t *bind_to_cpu(int target_cpu)
{
- cpu_set_t orig_mask, mask;
- int ret;
+ int nrcpus = numa_num_possible_cpus();
+ cpu_set_t *orig_mask, *mask;
+ size_t size;
- ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
- BUG_ON(ret);
+ orig_mask = CPU_ALLOC(nrcpus);
+ BUG_ON(!orig_mask);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, orig_mask);
+
+ if (sched_getaffinity(0, size, orig_mask))
+ goto err_out;
+
+ mask = CPU_ALLOC(nrcpus);
+ if (!mask)
+ goto err_out;
- CPU_ZERO(&mask);
+ CPU_ZERO_S(size, mask);
if (target_cpu == -1) {
int cpu;
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
- CPU_SET(cpu, &mask);
+ CPU_SET_S(cpu, size, mask);
} else {
- BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
- CPU_SET(target_cpu, &mask);
+ if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
+ goto err;
+
+ CPU_SET_S(target_cpu, size, mask);
}
- ret = sched_setaffinity(0, sizeof(mask), &mask);
- BUG_ON(ret);
+ if (sched_setaffinity(0, size, mask))
+ goto err;
return orig_mask;
+
+err:
+ CPU_FREE(mask);
+err_out:
+ CPU_FREE(orig_mask);
+
+ /* BUG_ON due to failure in allocation of orig_mask/mask */
+ BUG_ON(-1);
}
-static cpu_set_t bind_to_node(int target_node)
+static cpu_set_t *bind_to_node(int target_node)
{
- cpu_set_t orig_mask, mask;
+ int nrcpus = numa_num_possible_cpus();
+ size_t size;
+ cpu_set_t *orig_mask, *mask;
int cpu;
- int ret;
- ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
- BUG_ON(ret);
+ orig_mask = CPU_ALLOC(nrcpus);
+ BUG_ON(!orig_mask);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, orig_mask);
- CPU_ZERO(&mask);
+ if (sched_getaffinity(0, size, orig_mask))
+ goto err_out;
+
+ mask = CPU_ALLOC(nrcpus);
+ if (!mask)
+ goto err_out;
+
+ CPU_ZERO_S(size, mask);
if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
- CPU_SET(cpu, &mask);
+ CPU_SET_S(cpu, size, mask);
} else {
struct bitmask *cpumask = numa_allocate_cpumask();
- BUG_ON(!cpumask);
+ if (!cpumask)
+ goto err;
+
if (!numa_node_to_cpus(target_node, cpumask)) {
for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
if (numa_bitmask_isbitset(cpumask, cpu))
- CPU_SET(cpu, &mask);
+ CPU_SET_S(cpu, size, mask);
}
}
numa_free_cpumask(cpumask);
}
- ret = sched_setaffinity(0, sizeof(mask), &mask);
- BUG_ON(ret);
+ if (sched_setaffinity(0, size, mask))
+ goto err;
return orig_mask;
+
+err:
+ CPU_FREE(mask);
+err_out:
+ CPU_FREE(orig_mask);
+
+ /* BUG_ON due to failure in allocation of orig_mask/mask */
+ BUG_ON(-1);
}
-static void bind_to_cpumask(cpu_set_t mask)
+static void bind_to_cpumask(cpu_set_t *mask)
{
int ret;
+ size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
- ret = sched_setaffinity(0, sizeof(mask), &mask);
- BUG_ON(ret);
+ ret = sched_setaffinity(0, size, mask);
+ if (ret) {
+ CPU_FREE(mask);
+ BUG_ON(ret);
+ }
}
static void mempol_restore(void)
@@ -376,7 +421,7 @@ do { \
static u8 *alloc_data(ssize_t bytes0, int map_flags,
int init_zero, int init_cpu0, int thp, int init_random)
{
- cpu_set_t orig_mask;
+ cpu_set_t *orig_mask = NULL;
ssize_t bytes;
u8 *buf;
int ret;
@@ -434,6 +479,7 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
/* Restore affinity: */
if (init_cpu0) {
bind_to_cpumask(orig_mask);
+ CPU_FREE(orig_mask);
mempol_restore();
}
@@ -585,10 +631,16 @@ static int parse_setup_cpu_list(void)
return -1;
}
+ if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
+ printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
+ return -1;
+ }
+
BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
BUG_ON(bind_cpu_0 > bind_cpu_1);
for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
+ size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
int i;
for (i = 0; i < mul; i++) {
@@ -608,10 +660,15 @@ static int parse_setup_cpu_list(void)
tprintf("%2d", bind_cpu);
}
- CPU_ZERO(&td->bind_cpumask);
+ td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+ BUG_ON(!td->bind_cpumask);
+ CPU_ZERO_S(size, td->bind_cpumask);
for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
- BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
- CPU_SET(cpu, &td->bind_cpumask);
+ if (cpu < 0 || cpu >= g->p.nr_cpus) {
+ CPU_FREE(td->bind_cpumask);
+ BUG_ON(-1);
+ }
+ CPU_SET_S(cpu, size, td->bind_cpumask);
}
t++;
}
@@ -752,8 +809,6 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,
return parse_node_list(arg);
}
-#define BIT(x) (1ul << x)
-
static inline uint32_t lfsr_32(uint32_t lfsr)
{
const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
@@ -1241,7 +1296,7 @@ static void *worker_thread(void *__tdata)
* by migrating to CPU#0:
*/
if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
- cpu_set_t orig_mask;
+ cpu_set_t *orig_mask;
int target_cpu;
int this_cpu;
@@ -1265,6 +1320,7 @@ static void *worker_thread(void *__tdata)
printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
bind_to_cpumask(orig_mask);
+ CPU_FREE(orig_mask);
}
if (details >= 3) {
@@ -1398,21 +1454,31 @@ static void init_thread_data(void)
for (t = 0; t < g->p.nr_tasks; t++) {
struct thread_data *td = g->threads + t;
+ size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
int cpu;
/* Allow all nodes by default: */
td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
- CPU_ZERO(&td->bind_cpumask);
+ td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+ BUG_ON(!td->bind_cpumask);
+ CPU_ZERO_S(cpuset_size, td->bind_cpumask);
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
- CPU_SET(cpu, &td->bind_cpumask);
+ CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
}
}
static void deinit_thread_data(void)
{
ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+ int t;
+
+ /* Free the bind_cpumask allocated for thread_data */
+ for (t = 0; t < g->p.nr_tasks; t++) {
+ struct thread_data *td = g->threads + t;
+ CPU_FREE(td->bind_cpumask);
+ }
free_data(g->threads, size);
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ba74fab02e62..069825c48d40 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -989,8 +989,11 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
struct mmap *overwrite_mmap = evlist->overwrite_mmap;
struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
- thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
- thread_data->mask->maps.nbits);
+ if (cpu_map__is_dummy(cpus))
+ thread_data->nr_mmaps = nr_mmaps;
+ else
+ thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
+ thread_data->mask->maps.nbits);
if (mmap) {
thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
if (!thread_data->maps)
@@ -1007,16 +1010,17 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
- if (test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+ if (cpu_map__is_dummy(cpus) ||
+ test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
if (thread_data->maps) {
thread_data->maps[tm] = &mmap[m];
pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
- thread_data, cpus->map[m].cpu, tm, m);
+ thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
}
if (thread_data->overwrite_maps) {
thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
- thread_data, cpus->map[m].cpu, tm, m);
+ thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
}
tm++;
}
@@ -3329,6 +3333,9 @@ static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_c
{
int c;
+ if (cpu_map__is_dummy(cpus))
+ return;
+
for (c = 0; c < cpus->nr; c++)
set_bit(cpus->map[c].cpu, mask->bits);
}
@@ -3680,6 +3687,11 @@ static int record__init_thread_masks(struct record *rec)
if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus);
+ if (cpu_map__is_dummy(cpus)) {
+ pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
+ return -EINVAL;
+ }
+
switch (rec->opts.threads_spec) {
case THREAD_SPEC__CPU:
ret = record__init_thread_cpu_masks(rec, cpus);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 1ad75c7ba074..afe4a5539ecc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -353,6 +353,7 @@ static int report__setup_sample_type(struct report *rep)
struct perf_session *session = rep->session;
u64 sample_type = evlist__combined_sample_type(session->evlist);
bool is_pipe = perf_data__is_pipe(session->data);
+ struct evsel *evsel;
if (session->itrace_synth_opts->callchain ||
session->itrace_synth_opts->add_callchain ||
@@ -407,6 +408,19 @@ static int report__setup_sample_type(struct report *rep)
}
if (sort__mode == SORT_MODE__MEMORY) {
+ /*
+ * FIXUP: prior to kernel 5.18, Arm SPE missed to set
+ * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
+ * compatibility, set the bit if it's an old perf data file.
+ */
+ evlist__for_each_entry(session->evlist, evsel) {
+ if (strstr(evsel->name, "arm_spe") &&
+ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+ sample_type |= PERF_SAMPLE_DATA_SRC;
+ }
+ }
+
if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
ui__error("Selected --mem-mode but no mem data. "
"Did you call perf record without -d?\n");
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index a2f117936188..cf5eab5431b4 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -461,7 +461,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
return -EINVAL;
if (PRINT_FIELD(DATA_SRC) &&
- evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(WEIGHT) &&
diff --git a/tools/perf/tests/attr/README b/tools/perf/tests/attr/README
index 454505d343fa..eb3f7d4bb324 100644
--- a/tools/perf/tests/attr/README
+++ b/tools/perf/tests/attr/README
@@ -60,6 +60,7 @@ Following tests are defined (with perf commands):
perf record -R kill (test-record-raw)
perf record -c 2 -e arm_spe_0// -- kill (test-record-spe-period)
perf record -e arm_spe_0/period=3/ -- kill (test-record-spe-period-term)
+ perf record -e arm_spe_0/pa_enable=1/ -- kill (test-record-spe-physical-address)
perf stat -e cycles kill (test-stat-basic)
perf stat kill (test-stat-default)
perf stat -d kill (test-stat-detailed-1)
diff --git a/tools/perf/tests/attr/test-record-spe-physical-address b/tools/perf/tests/attr/test-record-spe-physical-address
new file mode 100644
index 000000000000..7ebcf5012ce3
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-spe-physical-address
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -e arm_spe_0/pa_enable=1/ -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+# 622727 is the decimal of IP|TID|TIME|CPU|IDENTIFIER|DATA_SRC|PHYS_ADDR
+sample_type=622727
+
+# dummy event
+[event-1:base-record-spe] \ No newline at end of file
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
index cc6df49a65a1..4ad0dfbc8b21 100644
--- a/tools/perf/tests/perf-time-to-tsc.c
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -123,6 +123,10 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
evsel->core.attr.enable_on_exec = 0;
}
+ if (evlist__open(evlist) == -ENOENT) {
+ err = TEST_SKIP;
+ goto out_err;
+ }
CHECK__(evlist__open(evlist));
CHECK__(evlist__mmap(evlist, UINT_MAX));
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index d2b64e3f588b..1a80151baed9 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -1033,10 +1033,11 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
memset(&attr, 0, sizeof(struct perf_event_attr));
attr.size = sizeof(struct perf_event_attr);
attr.type = PERF_TYPE_HARDWARE;
- attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
+ attr.sample_type = evsel->core.attr.sample_type &
+ (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
- PERF_SAMPLE_WEIGHT;
+ PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
if (spe->timeless_decoding)
attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
else
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index df7b18fb6b6e..1aad7d6d34aa 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -20,7 +20,11 @@
#include "llvm/Option/Option.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ManagedStatic.h"
+#if CLANG_VERSION_MAJOR >= 14
+#include "llvm/MC/TargetRegistry.h"
+#else
#include "llvm/Support/TargetRegistry.h"
+#endif
#include "llvm/Support/TargetSelect.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d546ff724dbe..a27132e5a5ef 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -983,6 +983,57 @@ static int write_dir_format(struct feat_fd *ff,
return do_write(ff, &data->dir.version, sizeof(data->dir.version));
}
+/*
+ * Check whether a CPU is online
+ *
+ * Returns:
+ * 1 -> if CPU is online
+ * 0 -> if CPU is offline
+ * -1 -> error case
+ */
+int is_cpu_online(unsigned int cpu)
+{
+ char *str;
+ size_t strlen;
+ char buf[256];
+ int status = -1;
+ struct stat statbuf;
+
+ snprintf(buf, sizeof(buf),
+ "/sys/devices/system/cpu/cpu%d", cpu);
+ if (stat(buf, &statbuf) != 0)
+ return 0;
+
+ /*
+ * Check if /sys/devices/system/cpu/cpux/online file
+ * exists. Some cases cpu0 won't have online file since
+ * it is not expected to be turned off generally.
+ * In kernels without CONFIG_HOTPLUG_CPU, this
+ * file won't exist
+ */
+ snprintf(buf, sizeof(buf),
+ "/sys/devices/system/cpu/cpu%d/online", cpu);
+ if (stat(buf, &statbuf) != 0)
+ return 1;
+
+ /*
+ * Read online file using sysfs__read_str.
+ * If read or open fails, return -1.
+ * If read succeeds, return value from file
+ * which gets stored in "str"
+ */
+ snprintf(buf, sizeof(buf),
+ "devices/system/cpu/cpu%d/online", cpu);
+
+ if (sysfs__read_str(buf, &str, &strlen) < 0)
+ return status;
+
+ status = atoi(str);
+
+ free(str);
+ return status;
+}
+
#ifdef HAVE_LIBBPF_SUPPORT
static int write_bpf_prog_info(struct feat_fd *ff,
struct evlist *evlist __maybe_unused)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index c9e3265832d9..0eb4bc29a5a4 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -158,6 +158,7 @@ int do_write(struct feat_fd *fd, const void *buf, size_t size);
int write_padded(struct feat_fd *fd, const void *bf,
size_t count, size_t count_aligned);
+int is_cpu_online(unsigned int cpu);
/*
* arch specific callback
*/
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 24997925ae00..dd84fed698a3 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1523,7 +1523,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
bool use_uncore_alias;
LIST_HEAD(config_terms);
- if (verbose > 1) {
+ pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
+
+ if (verbose > 1 && !(pmu && pmu->selectable)) {
fprintf(stderr, "Attempting to add event pmu '%s' with '",
name);
if (head_config) {
@@ -1536,7 +1538,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
fprintf(stderr, "' that may result in non-fatal errors\n");
}
- pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
if (!pmu) {
char *err_str;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 45a30040ec8d..f9a320694b85 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2576,7 +2576,7 @@ int perf_session__process_events(struct perf_session *session)
if (perf_data__is_pipe(session->data))
return __perf_session__process_pipe_events(session);
- if (perf_data__is_dir(session->data))
+ if (perf_data__is_dir(session->data) && session->data->dir.nr)
return __perf_session__process_dir_events(session);
return __perf_session__process_events(session);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index ee6f03481215..817a2de264b4 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include <linux/err.h>
#include <inttypes.h>
#include <math.h>
#include <string.h>
@@ -311,7 +312,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
if (!mask) {
mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
- if (!mask)
+ if (IS_ERR(mask))
return -ENOMEM;
counter->per_pkg_mask = mask;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 31cd59a2b66e..ecd377938eea 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1290,7 +1290,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0) {
- symbols__fixup_end(&dso->symbols);
+ symbols__fixup_end(&dso->symbols, false);
symbols__fixup_duplicate(&dso->symbols);
if (kmap) {
/*
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index dea0fc495185..f72baf636724 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -101,11 +101,6 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
-void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
-{
- p->end = c->start;
-}
-
const char * __weak arch__normalize_symbol_name(const char *name)
{
return name;
@@ -217,7 +212,8 @@ again:
}
}
-void symbols__fixup_end(struct rb_root_cached *symbols)
+/* Update zero-sized symbols using the address of the next symbol */
+void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
{
struct rb_node *nd, *prevnd = rb_first_cached(symbols);
struct symbol *curr, *prev;
@@ -231,8 +227,29 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
prev = curr;
curr = rb_entry(nd, struct symbol, rb_node);
- if (prev->end == prev->start || prev->end != curr->start)
- arch__symbols__fixup_end(prev, curr);
+ /*
+ * On some architecture kernel text segment start is located at
+ * some low memory address, while modules are located at high
+ * memory addresses (or vice versa). The gap between end of
+ * kernel text segment and beginning of first module's text
+ * segment is very big. Therefore do not fill this gap and do
+ * not assign it to the kernel dso map (kallsyms).
+ *
+ * In kallsyms, it determines module symbols using '[' character
+ * like in:
+ * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
+ */
+ if (prev->end == prev->start) {
+ /* Last kernel/module symbol mapped to end of page */
+ if (is_kallsyms && (!strchr(prev->name, '[') !=
+ !strchr(curr->name, '[')))
+ prev->end = roundup(prev->end + 4096, 4096);
+ else
+ prev->end = curr->start;
+
+ pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
+ __func__, prev->name, prev->end);
+ }
}
/* Last entry */
@@ -1467,7 +1484,7 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
if (kallsyms__delta(kmap, filename, &delta))
return -1;
- symbols__fixup_end(&dso->symbols);
+ symbols__fixup_end(&dso->symbols, true);
symbols__fixup_duplicate(&dso->symbols);
if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
@@ -1659,7 +1676,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
#undef bfd_asymbol_section
#endif
- symbols__fixup_end(&dso->symbols);
+ symbols__fixup_end(&dso->symbols, false);
symbols__fixup_duplicate(&dso->symbols);
dso->adjust_symbols = 1;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index fbf866d82dcc..0b893dcc8ea6 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -203,7 +203,7 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
bool kernel);
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
void symbols__fixup_duplicate(struct rb_root_cached *symbols);
-void symbols__fixup_end(struct rb_root_cached *symbols);
+void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
void maps__fixup_end(struct maps *maps);
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
@@ -241,7 +241,6 @@ const char *arch__normalize_symbol_name(const char *name);
#define SYMBOL_A 0
#define SYMBOL_B 1
-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
int arch__compare_symbol_names(const char *namea, const char *nameb);
int arch__compare_symbol_names_n(const char *namea, const char *nameb,
unsigned int n);
diff --git a/tools/power/x86/intel-speed-select/Makefile b/tools/power/x86/intel-speed-select/Makefile
index 846f785e278d..7221f2f55e8b 100644
--- a/tools/power/x86/intel-speed-select/Makefile
+++ b/tools/power/x86/intel-speed-select/Makefile
@@ -42,7 +42,7 @@ ISST_IN := $(OUTPUT)intel-speed-select-in.o
$(ISST_IN): prepare FORCE
$(Q)$(MAKE) $(build)=intel-speed-select
$(OUTPUT)intel-speed-select: $(ISST_IN)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+ $(QUIET_LINK)$(CC) $(CFLAGS) $< $(LDFLAGS) -o $@
clean:
rm -f $(ALL_PROGRAMS)
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index 81539f543954..d5c1bcba86fe 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -25,7 +25,8 @@ struct kmem_cache {
void (*ctor)(void *);
};
-void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+ int gfp)
{
void *p;
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
index 429f7ee735cf..fd23c80eba31 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
@@ -159,6 +159,17 @@ flooding_remotes_add()
local lsb
local i
+ # Prevent unwanted packets from entering the bridge and interfering
+ # with the test.
+ tc qdisc add dev br0 clsact
+ tc filter add dev br0 egress protocol all pref 1 handle 1 \
+ matchall skip_hw action drop
+ tc qdisc add dev $h1 clsact
+ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
+ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
+ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
+ matchall skip_hw action drop
+
for i in $(eval echo {1..$num_remotes}); do
lsb=$((i + 1))
@@ -195,6 +206,12 @@ flooding_filters_del()
done
tc qdisc del dev $rp2 clsact
+
+ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
+ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
+ tc qdisc del dev $h1 clsact
+ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
+ tc qdisc del dev br0 clsact
}
flooding_check_packets()
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
index fedcb7b35af9..af5ea50ed5c0 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
@@ -172,6 +172,17 @@ flooding_filters_add()
local lsb
local i
+ # Prevent unwanted packets from entering the bridge and interfering
+ # with the test.
+ tc qdisc add dev br0 clsact
+ tc filter add dev br0 egress protocol all pref 1 handle 1 \
+ matchall skip_hw action drop
+ tc qdisc add dev $h1 clsact
+ tc filter add dev $h1 egress protocol all pref 1 handle 1 \
+ flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
+ tc filter add dev $h1 egress protocol all pref 2 handle 2 \
+ matchall skip_hw action drop
+
tc qdisc add dev $rp2 clsact
for i in $(eval echo {1..$num_remotes}); do
@@ -194,6 +205,12 @@ flooding_filters_del()
done
tc qdisc del dev $rp2 clsact
+
+ tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
+ tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
+ tc qdisc del dev $h1 clsact
+ tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
+ tc qdisc del dev br0 clsact
}
flooding_check_packets()
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index d1e8f5237469..0b0e4402bba6 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -3,6 +3,7 @@
/aarch64/debug-exceptions
/aarch64/get-reg-list
/aarch64/psci_cpu_on_test
+/aarch64/vcpu_width_config
/aarch64/vgic_init
/aarch64/vgic_irq
/s390x/memop
@@ -33,6 +34,7 @@
/x86_64/state_test
/x86_64/svm_vmcall_test
/x86_64/svm_int_ctl_test
+/x86_64/tsc_scaling_sync
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
/x86_64/userspace_io_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 21c2dbd21a81..681b173aa87c 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -106,6 +106,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
+TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += demand_paging_test
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index b08d30bf71c5..3b940a101bc0 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
}
+static int gic_fd;
+
static struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
unsigned int i;
- int ret;
int nr_vcpus = test_args.nr_vcpus;
vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL);
test_init_timer_irq(vm);
- ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
- if (ret < 0) {
+ gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+ if (gic_fd < 0) {
print_skip("Failed to create vgic-v3");
exit(KSFT_SKIP);
}
@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
return vm;
}
+static void test_vm_cleanup(struct kvm_vm *vm)
+{
+ close(gic_fd);
+ kvm_vm_free(vm);
+}
+
static void test_print_help(char *name)
{
pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
vm = test_vm_create();
test_run(vm);
- kvm_vm_free(vm);
+ test_vm_cleanup(vm);
return 0;
}
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index f12147c43464..0b571f3fe64c 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -503,8 +503,13 @@ static void run_test(struct vcpu_config *c)
++missing_regs;
if (new_regs || missing_regs) {
+ n = 0;
+ for_each_reg_filtered(i)
+ ++n;
+
printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
- printf("%s: Number registers: %5lld\n", config_name(c), reg_list->n);
+ printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
+ config_name(c), reg_list->n, reg_list->n - n);
}
if (new_regs) {
@@ -683,9 +688,10 @@ static __u64 base_regs[] = {
KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
- KVM_REG_ARM_FW_REG(0),
- KVM_REG_ARM_FW_REG(1),
- KVM_REG_ARM_FW_REG(2),
+ KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
+ KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
+ KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
+ KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
new file mode 100644
index 000000000000..6e9402679229
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
+ *
+ * Copyright (c) 2022 Google LLC.
+ *
+ * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
+ * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
+ * configured.
+ */
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+
+/*
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ */
+static int add_init_2vcpus(struct kvm_vcpu_init *init1,
+ struct kvm_vcpu_init *init2)
+{
+ struct kvm_vm *vm;
+ int ret;
+
+ vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+ vm_vcpu_add(vm, 0);
+ ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+ if (ret)
+ goto free_exit;
+
+ vm_vcpu_add(vm, 1);
+ ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+ kvm_vm_free(vm);
+ return ret;
+}
+
+/*
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ */
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
+ struct kvm_vcpu_init *init2)
+{
+ struct kvm_vm *vm;
+ int ret;
+
+ vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+ vm_vcpu_add(vm, 0);
+ vm_vcpu_add(vm, 1);
+
+ ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+ if (ret)
+ goto free_exit;
+
+ ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+ kvm_vm_free(vm);
+ return ret;
+}
+
+/*
+ * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
+ * configured, and two mixed-width vCPUs cannot be configured.
+ * Each of those three cases, configure vCPUs in two different orders.
+ * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
+ * KVM_ARM_VCPU_INIT for them.
+ * The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
+ * and then run those commands for another vCPU.
+ */
+int main(void)
+{
+ struct kvm_vcpu_init init1, init2;
+ struct kvm_vm *vm;
+ int ret;
+
+ if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
+ print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
+ exit(KSFT_SKIP);
+ }
+
+ /* Get the preferred target type and copy that to init2 for later use */
+ vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+ vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+ kvm_vm_free(vm);
+ init2 = init1;
+
+ /* Test with 64bit vCPUs */
+ ret = add_init_2vcpus(&init1, &init1);
+ TEST_ASSERT(ret == 0,
+ "Configuring 64bit EL1 vCPUs failed unexpectedly");
+ ret = add_2vcpus_init_2vcpus(&init1, &init1);
+ TEST_ASSERT(ret == 0,
+ "Configuring 64bit EL1 vCPUs failed unexpectedly");
+
+ /* Test with 32bit vCPUs */
+ init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+ ret = add_init_2vcpus(&init1, &init1);
+ TEST_ASSERT(ret == 0,
+ "Configuring 32bit EL1 vCPUs failed unexpectedly");
+ ret = add_2vcpus_init_2vcpus(&init1, &init1);
+ TEST_ASSERT(ret == 0,
+ "Configuring 32bit EL1 vCPUs failed unexpectedly");
+
+ /* Test with mixed-width vCPUs */
+ init1.features[0] = 0;
+ init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+ ret = add_init_2vcpus(&init1, &init2);
+ TEST_ASSERT(ret != 0,
+ "Configuring mixed-width vCPUs worked unexpectedly");
+ ret = add_2vcpus_init_2vcpus(&init1, &init2);
+ TEST_ASSERT(ret != 0,
+ "Configuring mixed-width vCPUs worked unexpectedly");
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index c9d9e513ca04..7b47ae4f952e 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -18,11 +18,40 @@
#include "test_util.h"
#include "perf_test_util.h"
#include "guest_modes.h"
+
#ifdef __aarch64__
#include "aarch64/vgic.h"
#define GICD_BASE_GPA 0x8000000ULL
#define GICR_BASE_GPA 0x80A0000ULL
+
+static int gic_fd;
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+ /*
+ * The test can still run even if hardware does not support GICv3, as it
+ * is only an optimization to reduce guest exits.
+ */
+ gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+ if (gic_fd > 0)
+ close(gic_fd);
+}
+
+#else /* __aarch64__ */
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+}
+
#endif
/* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
@@ -206,9 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vm_enable_cap(vm, &cap);
}
-#ifdef __aarch64__
- vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-#endif
+ arch_setup_vm(vm, nr_vcpus);
/* Start the iterations */
iteration = 0;
@@ -302,6 +329,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
}
free_bitmaps(bitmaps, p->slots);
+ arch_cleanup_vm(vm);
perf_test_destroy_vm(vm);
}
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index dc284c6bdbc3..eca5c622efd2 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -101,7 +101,9 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
#define PGTBL_PTE_WRITE_SHIFT 2
#define PGTBL_PTE_READ_MASK 0x0000000000000002ULL
#define PGTBL_PTE_READ_SHIFT 1
-#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_EXECUTE_MASK | \
+#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_ACCESSED_MASK | \
+ PGTBL_PTE_DIRTY_MASK | \
+ PGTBL_PTE_EXECUTE_MASK | \
PGTBL_PTE_WRITE_MASK | \
PGTBL_PTE_READ_MASK)
#define PGTBL_PTE_VALID_MASK 0x0000000000000001ULL
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 37db341d4cc5..d0d51adec76e 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -60,6 +60,23 @@
/* CPUID.0x8000_0001.EDX */
#define CPUID_GBPAGES (1ul << 26)
+/* Page table bitfield declarations */
+#define PTE_PRESENT_MASK BIT_ULL(0)
+#define PTE_WRITABLE_MASK BIT_ULL(1)
+#define PTE_USER_MASK BIT_ULL(2)
+#define PTE_ACCESSED_MASK BIT_ULL(5)
+#define PTE_DIRTY_MASK BIT_ULL(6)
+#define PTE_LARGE_MASK BIT_ULL(7)
+#define PTE_GLOBAL_MASK BIT_ULL(8)
+#define PTE_NX_MASK BIT_ULL(63)
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1ULL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define PHYSICAL_PAGE_MASK GENMASK_ULL(51, 12)
+#define PTE_GET_PFN(pte) (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+
/* General Registers in 64-Bit Mode */
struct gpr64_regs {
u64 rax;
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index ba1fdc3dcf4a..2c4a7563a4f8 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -278,7 +278,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
else
guest_test_phys_mem = p->phys_offset;
#ifdef __s390x__
- alignment = max(0x100000, alignment);
+ alignment = max(0x100000UL, alignment);
#endif
guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c
index d377f2603d98..3961487a4870 100644
--- a/tools/testing/selftests/kvm/lib/riscv/processor.c
+++ b/tools/testing/selftests/kvm/lib/riscv/processor.c
@@ -268,7 +268,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
}
-static void guest_hang(void)
+static void __aligned(16) guest_hang(void)
{
while (1)
;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 9f000dfb5594..33ea5e9955d9 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -19,38 +19,6 @@
vm_vaddr_t exception_handlers;
-/* Virtual translation table structure declarations */
-struct pageUpperEntry {
- uint64_t present:1;
- uint64_t writable:1;
- uint64_t user:1;
- uint64_t write_through:1;
- uint64_t cache_disable:1;
- uint64_t accessed:1;
- uint64_t ignored_06:1;
- uint64_t page_size:1;
- uint64_t ignored_11_08:4;
- uint64_t pfn:40;
- uint64_t ignored_62_52:11;
- uint64_t execute_disable:1;
-};
-
-struct pageTableEntry {
- uint64_t present:1;
- uint64_t writable:1;
- uint64_t user:1;
- uint64_t write_through:1;
- uint64_t cache_disable:1;
- uint64_t accessed:1;
- uint64_t dirty:1;
- uint64_t reserved_07:1;
- uint64_t global:1;
- uint64_t ignored_11_09:3;
- uint64_t pfn:40;
- uint64_t ignored_62_52:11;
- uint64_t execute_disable:1;
-};
-
void regs_dump(FILE *stream, struct kvm_regs *regs,
uint8_t indent)
{
@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
return &page_table[index];
}
-static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
- uint64_t pt_pfn,
- uint64_t vaddr,
- uint64_t paddr,
- int level,
- enum x86_page_size page_size)
+static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
+ uint64_t pt_pfn,
+ uint64_t vaddr,
+ uint64_t paddr,
+ int level,
+ enum x86_page_size page_size)
{
- struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
-
- if (!pte->present) {
- pte->writable = true;
- pte->present = true;
- pte->page_size = (level == page_size);
- if (pte->page_size)
- pte->pfn = paddr >> vm->page_shift;
+ uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
+
+ if (!(*pte & PTE_PRESENT_MASK)) {
+ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
+ if (level == page_size)
+ *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
else
- pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
+ *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
} else {
/*
* Entry already present. Assert that the caller doesn't want
@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
TEST_ASSERT(level != page_size,
"Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
page_size, vaddr);
- TEST_ASSERT(!pte->page_size,
+ TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
"Cannot create page table at level: %u, vaddr: 0x%lx\n",
level, vaddr);
}
@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
enum x86_page_size page_size)
{
const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
- struct pageUpperEntry *pml4e, *pdpe, *pde;
- struct pageTableEntry *pte;
+ uint64_t *pml4e, *pdpe, *pde;
+ uint64_t *pte;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
"Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -257,24 +223,22 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
*/
pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
vaddr, paddr, 3, page_size);
- if (pml4e->page_size)
+ if (*pml4e & PTE_LARGE_MASK)
return;
- pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
- if (pdpe->page_size)
+ pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
+ if (*pdpe & PTE_LARGE_MASK)
return;
- pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
- if (pde->page_size)
+ pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
+ if (*pde & PTE_LARGE_MASK)
return;
/* Fill in page table entry. */
- pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
- TEST_ASSERT(!pte->present,
+ pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
+ TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
- pte->pfn = paddr >> vm->page_shift;
- pte->writable = true;
- pte->present = 1;
+ *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
}
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -282,22 +246,22 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
__virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
}
-static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
+static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
uint64_t vaddr)
{
uint16_t index[4];
- struct pageUpperEntry *pml4e, *pdpe, *pde;
- struct pageTableEntry *pte;
+ uint64_t *pml4e, *pdpe, *pde;
+ uint64_t *pte;
struct kvm_cpuid_entry2 *entry;
struct kvm_sregs sregs;
int max_phy_addr;
- /* Set the bottom 52 bits. */
- uint64_t rsvd_mask = 0x000fffffffffffff;
+ uint64_t rsvd_mask = 0;
entry = kvm_get_supported_cpuid_index(0x80000008, 0);
max_phy_addr = entry->eax & 0x000000ff;
- /* Clear the bottom bits of the reserved mask. */
- rsvd_mask = (rsvd_mask >> max_phy_addr) << max_phy_addr;
+ /* Set the high bits in the reserved mask. */
+ if (max_phy_addr < 52)
+ rsvd_mask = GENMASK_ULL(51, max_phy_addr);
/*
* SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
@@ -307,7 +271,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
*/
vcpu_sregs_get(vm, vcpuid, &sregs);
if ((sregs.efer & EFER_NX) == 0) {
- rsvd_mask |= (1ull << 63);
+ rsvd_mask |= PTE_NX_MASK;
}
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
@@ -329,30 +293,29 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
index[3] = (vaddr >> 39) & 0x1ffu;
pml4e = addr_gpa2hva(vm, vm->pgd);
- TEST_ASSERT(pml4e[index[3]].present,
+ TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
"Expected pml4e to be present for gva: 0x%08lx", vaddr);
- TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
- (rsvd_mask | (1ull << 7))) == 0,
+ TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
"Unexpected reserved bits set.");
- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
- TEST_ASSERT(pdpe[index[2]].present,
+ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
+ TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
"Expected pdpe to be present for gva: 0x%08lx", vaddr);
- TEST_ASSERT(pdpe[index[2]].page_size == 0,
+ TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
"Expected pdpe to map a pde not a 1-GByte page.");
- TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
+ TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
"Unexpected reserved bits set.");
- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
- TEST_ASSERT(pde[index[1]].present,
+ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
+ TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
"Expected pde to be present for gva: 0x%08lx", vaddr);
- TEST_ASSERT(pde[index[1]].page_size == 0,
+ TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
"Expected pde to map a pte not a 2-MByte page.");
- TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
+ TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
"Unexpected reserved bits set.");
- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
- TEST_ASSERT(pte[index[0]].present,
+ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
+ TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
"Expected pte to be present for gva: 0x%08lx", vaddr);
return &pte[index[0]];
@@ -360,7 +323,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
{
- struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+ uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
return *(uint64_t *)pte;
}
@@ -368,18 +331,17 @@ uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
uint64_t pte)
{
- struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
- vaddr);
+ uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
*(uint64_t *)new_pte = pte;
}
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
- struct pageUpperEntry *pml4e, *pml4e_start;
- struct pageUpperEntry *pdpe, *pdpe_start;
- struct pageUpperEntry *pde, *pde_start;
- struct pageTableEntry *pte, *pte_start;
+ uint64_t *pml4e, *pml4e_start;
+ uint64_t *pdpe, *pdpe_start;
+ uint64_t *pde, *pde_start;
+ uint64_t *pte, *pte_start;
if (!vm->pgd_created)
return;
@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
fprintf(stream, "%*s index hvaddr gpaddr "
"addr w exec dirty\n",
indent, "");
- pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
+ pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
pml4e = &pml4e_start[n1];
- if (!pml4e->present)
+ if (!(*pml4e & PTE_PRESENT_MASK))
continue;
- fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
+ fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
" %u\n",
indent, "",
pml4e - pml4e_start, pml4e,
- addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
- pml4e->writable, pml4e->execute_disable);
+ addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
+ !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
- pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
+ pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
pdpe = &pdpe_start[n2];
- if (!pdpe->present)
+ if (!(*pdpe & PTE_PRESENT_MASK))
continue;
- fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10lx "
+ fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
"%u %u\n",
indent, "",
pdpe - pdpe_start, pdpe,
addr_hva2gpa(vm, pdpe),
- (uint64_t) pdpe->pfn, pdpe->writable,
- pdpe->execute_disable);
+ PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
+ !!(*pdpe & PTE_NX_MASK));
- pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
+ pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
pde = &pde_start[n3];
- if (!pde->present)
+ if (!(*pde & PTE_PRESENT_MASK))
continue;
fprintf(stream, "%*spde 0x%-3zx %p "
- "0x%-12lx 0x%-10lx %u %u\n",
+ "0x%-12lx 0x%-10llx %u %u\n",
indent, "", pde - pde_start, pde,
addr_hva2gpa(vm, pde),
- (uint64_t) pde->pfn, pde->writable,
- pde->execute_disable);
+ PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
+ !!(*pde & PTE_NX_MASK));
- pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
+ pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
pte = &pte_start[n4];
- if (!pte->present)
+ if (!(*pte & PTE_PRESENT_MASK))
continue;
fprintf(stream, "%*spte 0x%-3zx %p "
- "0x%-12lx 0x%-10lx %u %u "
+ "0x%-12lx 0x%-10llx %u %u "
" %u 0x%-10lx\n",
indent, "",
pte - pte_start, pte,
addr_hva2gpa(vm, pte),
- (uint64_t) pte->pfn,
- pte->writable,
- pte->execute_disable,
- pte->dirty,
+ PTE_GET_PFN(*pte),
+ !!(*pte & PTE_WRITABLE_MASK),
+ !!(*pte & PTE_NX_MASK),
+ !!(*pte & PTE_DIRTY_MASK),
((uint64_t) n1 << 27)
| ((uint64_t) n2 << 18)
| ((uint64_t) n3 << 9)
@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
uint16_t index[4];
- struct pageUpperEntry *pml4e, *pdpe, *pde;
- struct pageTableEntry *pte;
+ uint64_t *pml4e, *pdpe, *pde;
+ uint64_t *pte;
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
if (!vm->pgd_created)
goto unmapped_gva;
pml4e = addr_gpa2hva(vm, vm->pgd);
- if (!pml4e[index[3]].present)
+ if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
goto unmapped_gva;
- pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
- if (!pdpe[index[2]].present)
+ pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
+ if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
goto unmapped_gva;
- pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
- if (!pde[index[1]].present)
+ pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
+ if (!(pde[index[1]] & PTE_PRESENT_MASK))
goto unmapped_gva;
- pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
- if (!pte[index[0]].present)
+ pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
+ if (!(pte[index[0]] & PTE_PRESENT_MASK))
goto unmapped_gva;
- return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
+ return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
unmapped_gva:
TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c
index 52a3ef6629e8..76f65c22796f 100644
--- a/tools/testing/selftests/kvm/x86_64/amx_test.c
+++ b/tools/testing/selftests/kvm/x86_64/amx_test.c
@@ -29,7 +29,6 @@
#define X86_FEATURE_XSAVE (1 << 26)
#define X86_FEATURE_OSXSAVE (1 << 27)
-#define PAGE_SIZE (1 << 12)
#define NUM_TILES 8
#define TILE_SIZE 1024
#define XSAVE_SIZE ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
diff --git a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
index f070ff0224fa..aeb3850f81bd 100644
--- a/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
+++ b/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
@@ -12,7 +12,6 @@
#include "vmx.h"
#define VCPU_ID 1
-#define PAGE_SIZE 4096
#define MAXPHYADDR 36
#define MEM_REGION_GVA 0x0000123456789000
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index a626d40fdb48..b4e0c860769e 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -21,8 +21,6 @@
#define VCPU_ID 1
-#define PAGE_SIZE 4096
-
#define SMRAM_SIZE 65536
#define SMRAM_MEMSLOT ((1 << 16) | 1)
#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index e683d0ac3e45..19b35c607dc6 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -32,7 +32,6 @@
#define MSR_IA32_TSC_ADJUST 0x3b
#endif
-#define PAGE_SIZE 4096
#define VCPU_ID 5
#define TSC_ADJUST_VALUE (1ll << 32)
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 865e17146815..bcd370827859 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -23,7 +23,6 @@
#define SHINFO_REGION_GVA 0xc0000000ULL
#define SHINFO_REGION_GPA 0xc0000000ULL
#define SHINFO_REGION_SLOT 10
-#define PAGE_SIZE 4096
#define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (2 * PAGE_SIZE))
#define DUMMY_REGION_SLOT 11
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index adc94452b57c..b30fe9de1d4f 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -15,7 +15,6 @@
#define HCALL_REGION_GPA 0xc0000000ULL
#define HCALL_REGION_SLOT 10
-#define PAGE_SIZE 4096
static struct kvm_vm *vm;
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index b019e0b8221c..84fda3b49073 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
if (in_shutdown++)
return;
+ /* Free the cpu_set allocated using CPU_ALLOC in main function */
+ CPU_FREE(cpu_set);
+
for (i = 0; i < num_cpus_to_pin; i++)
if (cpu_threads[i]) {
pthread_kill(cpu_threads[i], SIGUSR1);
@@ -551,6 +554,12 @@ int main(int argc, char *argv[])
perror("sysconf(_SC_NPROCESSORS_ONLN)");
exit(1);
}
+
+ if (getuid() != 0)
+ ksft_exit_skip("Not running as root, but almost all tests "
+ "require root in order to modify\nsystem settings. "
+ "Exiting.\n");
+
cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
cpu_set = CPU_ALLOC(cpus_online);
if (cpu_set == NULL) {
@@ -589,7 +598,7 @@ int main(int argc, char *argv[])
cpu_set)) {
fprintf(stderr, "Any given CPU may "
"only be given once.\n");
- exit(1);
+ goto err_code;
} else
CPU_SET_S(cpus_to_pin[cpu],
cpu_set_size, cpu_set);
@@ -607,7 +616,7 @@ int main(int argc, char *argv[])
queue_path = malloc(strlen(option) + 2);
if (!queue_path) {
perror("malloc()");
- exit(1);
+ goto err_code;
}
queue_path[0] = '/';
queue_path[1] = 0;
@@ -622,17 +631,12 @@ int main(int argc, char *argv[])
fprintf(stderr, "Must pass at least one CPU to continuous "
"mode.\n");
poptPrintUsage(popt_context, stderr, 0);
- exit(1);
+ goto err_code;
} else if (!continuous_mode) {
num_cpus_to_pin = 1;
cpus_to_pin[0] = cpus_online - 1;
}
- if (getuid() != 0)
- ksft_exit_skip("Not running as root, but almost all tests "
- "require root in order to modify\nsystem settings. "
- "Exiting.\n");
-
max_msgs = fopen(MAX_MSGS, "r+");
max_msgsize = fopen(MAX_MSGSIZE, "r+");
if (!max_msgs)
@@ -740,4 +744,9 @@ int main(int argc, char *argv[])
sleep(1);
}
shutdown(0, "", 0);
+
+err_code:
+ CPU_FREE(cpu_set);
+ exit(1);
+
}
diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c
index 7c0b0617b9f8..db0270127aeb 100644
--- a/tools/testing/selftests/vm/mremap_test.c
+++ b/tools/testing/selftests/vm/mremap_test.c
@@ -6,9 +6,11 @@
#include <errno.h>
#include <stdlib.h>
+#include <stdio.h>
#include <string.h>
#include <sys/mman.h>
#include <time.h>
+#include <stdbool.h>
#include "../kselftest.h"
@@ -64,6 +66,59 @@ enum {
}
/*
+ * Returns false if the requested remap region overlaps with an
+ * existing mapping (e.g text, stack) else returns true.
+ */
+static bool is_remap_region_valid(void *addr, unsigned long long size)
+{
+ void *remap_addr = NULL;
+ bool ret = true;
+
+ /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
+ remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
+ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+ -1, 0);
+
+ if (remap_addr == MAP_FAILED) {
+ if (errno == EEXIST)
+ ret = false;
+ } else {
+ munmap(remap_addr, size);
+ }
+
+ return ret;
+}
+
+/* Returns mmap_min_addr sysctl tunable from procfs */
+static unsigned long long get_mmap_min_addr(void)
+{
+ FILE *fp;
+ int n_matched;
+ static unsigned long long addr;
+
+ if (addr)
+ return addr;
+
+ fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
+ if (fp == NULL) {
+ ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
+ strerror(errno));
+ exit(KSFT_SKIP);
+ }
+
+ n_matched = fscanf(fp, "%llu", &addr);
+ if (n_matched != 1) {
+ ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
+ strerror(errno));
+ fclose(fp);
+ exit(KSFT_SKIP);
+ }
+
+ fclose(fp);
+ return addr;
+}
+
+/*
* Returns the start address of the mapping on success, else returns
* NULL on failure.
*/
@@ -71,11 +126,18 @@ static void *get_source_mapping(struct config c)
{
unsigned long long addr = 0ULL;
void *src_addr = NULL;
+ unsigned long long mmap_min_addr;
+
+ mmap_min_addr = get_mmap_min_addr();
+
retry:
addr += c.src_alignment;
+ if (addr < mmap_min_addr)
+ goto retry;
+
src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
- MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
- -1, 0);
+ MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+ -1, 0);
if (src_addr == MAP_FAILED) {
if (errno == EPERM || errno == EEXIST)
goto retry;
@@ -90,8 +152,10 @@ retry:
* alignment in the tests.
*/
if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
- !((unsigned long long) src_addr & c.src_alignment))
+ !((unsigned long long) src_addr & c.src_alignment)) {
+ munmap(src_addr, c.region_size);
goto retry;
+ }
if (!src_addr)
goto error;
@@ -140,9 +204,20 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
if (!((unsigned long long) addr & c.dest_alignment))
addr = (void *) ((unsigned long long) addr | c.dest_alignment);
+ /* Don't destroy existing mappings unless expected to overlap */
+ while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
+ /* Check for unsigned overflow */
+ if (addr + c.dest_alignment < addr) {
+ ksft_print_msg("Couldn't find a valid region to remap to\n");
+ ret = -1;
+ goto out;
+ }
+ addr += c.dest_alignment;
+ }
+
clock_gettime(CLOCK_MONOTONIC, &t_start);
dest_addr = mremap(src_addr, c.region_size, c.region_size,
- MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
+ MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
clock_gettime(CLOCK_MONOTONIC, &t_end);
if (dest_addr == MAP_FAILED) {
@@ -193,7 +268,7 @@ static void run_mremap_test_case(struct test test_case, int *failures,
if (remap_time < 0) {
if (test_case.expect_failure)
- ksft_test_result_pass("%s\n\tExpected mremap failure\n",
+ ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
test_case.name);
else {
ksft_test_result_fail("%s\n", test_case.name);
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index 3b265f140c25..352ba00cf26b 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -291,11 +291,16 @@ echo "-------------------"
echo "running mremap_test"
echo "-------------------"
./mremap_test
-if [ $? -ne 0 ]; then
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
echo "[FAIL]"
exitcode=1
-else
- echo "[PASS]"
fi
echo "-----------------"
diff --git a/tools/testing/selftests/wireguard/qemu/arch/i686.config b/tools/testing/selftests/wireguard/qemu/arch/i686.config
index a85025d7206e..a9b4fe795048 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/i686.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/i686.config
@@ -1,3 +1,4 @@
+CONFIG_ACPI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
diff --git a/tools/testing/selftests/wireguard/qemu/arch/x86_64.config b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
index 00a1ef4869d5..45dd53a0d760 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config
@@ -1,3 +1,4 @@
+CONFIG_ACPI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_CMDLINE_BOOL=y
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 222ecc81d7df..f4c2a6eb1666 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+// SPDX-License-Identifier: GPL-2.0-only
/*
* KVM dirty ring implementation
*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 70e05af5ebea..6d971fb1b08d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -164,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
{
}
+__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+}
+
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
{
/*
@@ -357,6 +361,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
#endif
+static void kvm_flush_shadow_all(struct kvm *kvm)
+{
+ kvm_arch_flush_shadow_all(kvm);
+ kvm_arch_guest_memory_reclaimed(kvm);
+}
+
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
gfp_t gfp_flags)
@@ -434,8 +444,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- kvm_dirty_ring_free(&vcpu->dirty_ring);
kvm_arch_vcpu_destroy(vcpu);
+ kvm_dirty_ring_free(&vcpu->dirty_ring);
/*
* No need for rcu_read_lock as VCPU_RUN is the only place that changes
@@ -485,12 +495,15 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
unsigned long end);
+typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+
struct kvm_hva_range {
unsigned long start;
unsigned long end;
pte_t pte;
hva_handler_t handler;
on_lock_fn_t on_lock;
+ on_unlock_fn_t on_unlock;
bool flush_on_ret;
bool may_block;
};
@@ -578,8 +591,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
if (range->flush_on_ret && ret)
kvm_flush_remote_tlbs(kvm);
- if (locked)
+ if (locked) {
KVM_MMU_UNLOCK(kvm);
+ if (!IS_KVM_NULL_FN(range->on_unlock))
+ range->on_unlock(kvm);
+ }
srcu_read_unlock(&kvm->srcu, idx);
@@ -600,6 +616,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
.pte = pte,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
+ .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = true,
.may_block = false,
};
@@ -619,6 +636,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
.pte = __pte(0),
.handler = handler,
.on_lock = (void *)kvm_null_fn,
+ .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = false,
};
@@ -662,7 +680,7 @@ void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
kvm->mmu_notifier_range_end = end;
} else {
/*
- * Fully tracking multiple concurrent ranges has dimishing
+ * Fully tracking multiple concurrent ranges has diminishing
* returns. Keep things simple and just find the minimal range
* which includes the current and new ranges. As there won't be
* enough information to subtract a range after its invalidate
@@ -687,6 +705,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
.pte = __pte(0),
.handler = kvm_unmap_gfn_range,
.on_lock = kvm_inc_notifier_count,
+ .on_unlock = kvm_arch_guest_memory_reclaimed,
.flush_on_ret = true,
.may_block = mmu_notifier_range_blockable(range),
};
@@ -741,6 +760,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
.pte = __pte(0),
.handler = (void *)kvm_null_fn,
.on_lock = kvm_dec_notifier_count,
+ .on_unlock = (void *)kvm_null_fn,
.flush_on_ret = false,
.may_block = mmu_notifier_range_blockable(range),
};
@@ -813,7 +833,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
int idx;
idx = srcu_read_lock(&kvm->srcu);
- kvm_arch_flush_shadow_all(kvm);
+ kvm_flush_shadow_all(kvm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -932,7 +952,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
kvm_vcpu_stats_header.num_desc;
- if (!kvm->debugfs_dentry)
+ if (IS_ERR(kvm->debugfs_dentry))
return;
debugfs_remove_recursive(kvm->debugfs_dentry);
@@ -1075,6 +1095,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+ /*
+ * Force subsequent debugfs file creations to fail if the VM directory
+ * is not created (by kvm_create_vm_debugfs()).
+ */
+ kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+
if (init_srcu_struct(&kvm->srcu))
goto out_err_no_srcu;
if (init_srcu_struct(&kvm->irq_srcu))
@@ -1219,7 +1245,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
kvm->mn_active_invalidate_count = 0;
#else
- kvm_arch_flush_shadow_all(kvm);
+ kvm_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
@@ -1646,6 +1672,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_root)
*/
kvm_arch_flush_shadow_memslot(kvm, old);
+ kvm_arch_guest_memory_reclaimed(kvm);
/* Was released by kvm_swap_active_memslots, reacquire. */
mutex_lock(&kvm->slots_arch_lock);
@@ -1793,7 +1820,7 @@ static int kvm_set_memslot(struct kvm *kvm,
/*
* No need to refresh new->arch, changes after dropping slots_arch_lock
- * will directly hit the final, active memsot. Architectures are
+ * will directly hit the final, active memslot. Architectures are
* responsible for knowing that new->arch may be stale.
*/
kvm_commit_memory_region(kvm, old, new, change);
@@ -4327,6 +4354,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
return 0;
#endif
case KVM_CAP_BINARY_STATS_FD:
+ case KVM_CAP_SYSTEM_EVENT_DATA:
return 1;
default:
break;
@@ -5479,7 +5507,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
}
add_uevent_var(env, "PID=%d", kvm->userspace_pid);
- if (kvm->debugfs_dentry) {
+ if (!IS_ERR(kvm->debugfs_dentry)) {
char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
if (p) {
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index 34ca40823260..41da467d99c9 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __KVM_MM_H__
#define __KVM_MM_H__ 1